[llvm] r369876 - [X86] Automatically generate stack folding tests. NFC

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 25 13:54:05 PDT 2019


I'm curious, is there an endgoal to all of these massive test
re-generation as of late?

On Sun, Aug 25, 2019 at 11:47 PM Amaury Sechet via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
>
> Author: deadalnix
> Date: Sun Aug 25 13:48:14 2019
> New Revision: 369876
>
> URL: http://llvm.org/viewvc/llvm-project?rev=369876&view=rev
> Log:
> [X86] Automatically generate stack folding tests. NFC
>
> Modified:
>     llvm/trunk/test/CodeGen/X86/stack-folding-3dnow.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-adx-x86_64.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-avx512bf16.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-avx512vp2intersect.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-bmi.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-bmi2.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-avx1.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-avx512.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-avx512vl.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-avx512vnni.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-int-sse42.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-lwp.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-mmx.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-sha.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-tbm.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-x86_64.ll
>     llvm/trunk/test/CodeGen/X86/stack-folding-xop.ll
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-3dnow.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-3dnow.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-3dnow.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-3dnow.ll Sun Aug 25 13:48:14 2019
> @@ -1,8 +1,16 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+3dnow | FileCheck %s
>
>  define x86_mmx @stack_fold_pavgusb(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pavgusb
> -  ;CHECK:       pavgusb {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pavgusb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pavgusb {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -10,8 +18,15 @@ define x86_mmx @stack_fold_pavgusb(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pf2id(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pf2id
> -  ;CHECK:       pf2id {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pf2id:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pf2id {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -19,8 +34,15 @@ define x86_mmx @stack_fold_pf2id(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pf2id(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pf2iw(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pf2iw
> -  ;CHECK:       pf2iw {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pf2iw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pf2iw {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -28,8 +50,15 @@ define x86_mmx @stack_fold_pf2iw(x86_mmx
>  declare x86_mmx @llvm.x86.3dnowa.pf2iw(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfacc(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfacc
> -  ;CHECK:       pfacc {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfacc:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfacc {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -37,8 +66,15 @@ define x86_mmx @stack_fold_pfacc(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfacc(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfadd(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfadd
> -  ;CHECK:       pfadd {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfadd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfadd {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -46,8 +82,15 @@ define x86_mmx @stack_fold_pfadd(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfadd(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfcmpeq(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfcmpeq
> -  ;CHECK:       pfcmpeq {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfcmpeq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfcmpeq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -55,8 +98,15 @@ define x86_mmx @stack_fold_pfcmpeq(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pfcmpeq(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfcmpge(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfcmpge
> -  ;CHECK:       pfcmpge {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfcmpge:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfcmpge {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -64,8 +114,15 @@ define x86_mmx @stack_fold_pfcmpge(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pfcmpge(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfcmpgt(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfcmpgt
> -  ;CHECK:       pfcmpgt {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfcmpgt:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfcmpgt {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -73,8 +130,15 @@ define x86_mmx @stack_fold_pfcmpgt(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pfcmpgt(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfmax(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfmax
> -  ;CHECK:       pfmax {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfmax:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfmax {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -82,8 +146,15 @@ define x86_mmx @stack_fold_pfmax(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfmax(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfmin(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfmin
> -  ;CHECK:       pfmin {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfmin:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfmin {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -91,8 +162,15 @@ define x86_mmx @stack_fold_pfmin(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfmin(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfmul(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfmul
> -  ;CHECK:       pfmul {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfmul:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfmul {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -100,8 +178,15 @@ define x86_mmx @stack_fold_pfmul(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfmul(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfnacc(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfnacc
> -  ;CHECK:       pfnacc {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfnacc:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfnacc {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -109,8 +194,15 @@ define x86_mmx @stack_fold_pfnacc(x86_mm
>  declare x86_mmx @llvm.x86.3dnowa.pfnacc(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfpnacc(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfpnacc
> -  ;CHECK:       pfpnacc {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfpnacc:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfpnacc {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -118,8 +210,15 @@ define x86_mmx @stack_fold_pfpnacc(x86_m
>  declare x86_mmx @llvm.x86.3dnowa.pfpnacc(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfrcp(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pfrcp
> -  ;CHECK:       pfrcp {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfrcp:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfrcp {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -127,8 +226,15 @@ define x86_mmx @stack_fold_pfrcp(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfrcp(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfrcpit1(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfrcpit1
> -  ;CHECK:       pfrcpit1 {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfrcpit1:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfrcpit1 {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -136,8 +242,15 @@ define x86_mmx @stack_fold_pfrcpit1(x86_
>  declare x86_mmx @llvm.x86.3dnow.pfrcpit1(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfrcpit2(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfrcpit2
> -  ;CHECK:       pfrcpit2 {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfrcpit2:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfrcpit2 {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -145,8 +258,15 @@ define x86_mmx @stack_fold_pfrcpit2(x86_
>  declare x86_mmx @llvm.x86.3dnow.pfrcpit2(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfrsqit1(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfrsqit1
> -  ;CHECK:       pfrsqit1 {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfrsqit1:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfrsqit1 {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -154,8 +274,15 @@ define x86_mmx @stack_fold_pfrsqit1(x86_
>  declare x86_mmx @llvm.x86.3dnow.pfrsqit1(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfrsqrt(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pfrsqrt
> -  ;CHECK:       pfrsqrt {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfrsqrt:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfrsqrt {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -163,8 +290,15 @@ define x86_mmx @stack_fold_pfrsqrt(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pfrsqrt(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfsub(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfsub
> -  ;CHECK:       pfsub {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfsub:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfsub {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -172,8 +306,15 @@ define x86_mmx @stack_fold_pfsub(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pfsub(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pfsubr(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pfsubr
> -  ;CHECK:       pfsubr {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pfsubr:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pfsubr {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -181,8 +322,15 @@ define x86_mmx @stack_fold_pfsubr(x86_mm
>  declare x86_mmx @llvm.x86.3dnow.pfsubr(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pi2fd(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pi2fd
> -  ;CHECK:       pi2fd {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pi2fd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pi2fd {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -190,8 +338,15 @@ define x86_mmx @stack_fold_pi2fd(x86_mmx
>  declare x86_mmx @llvm.x86.3dnow.pi2fd(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pi2fw(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pi2fw
> -  ;CHECK:       pi2fw {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pi2fw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pi2fw {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
> @@ -199,8 +354,15 @@ define x86_mmx @stack_fold_pi2fw(x86_mmx
>  declare x86_mmx @llvm.x86.3dnowa.pi2fw(x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pmulhrw(x86_mmx %a, x86_mmx %b) {
> -  ;CHECK-LABEL: stack_fold_pmulhrw
> -  ;CHECK:       pmulhrw {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhrw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pmulhrw {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx %a, x86_mmx %b) nounwind readnone
>    ret x86_mmx %2
> @@ -208,8 +370,16 @@ define x86_mmx @stack_fold_pmulhrw(x86_m
>  declare x86_mmx @llvm.x86.3dnow.pmulhrw(x86_mmx, x86_mmx) nounwind readnone
>
>  define x86_mmx @stack_fold_pswapd(x86_mmx %a) {
> -  ;CHECK-LABEL: stack_fold_pswapd
> -  ;CHECK:       pswapd {{-?[0-9]*}}(%rsp), {{%mm[0-7]}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pswapd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movq %mm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    pswapd {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    # mm0 = mem[1,0]
> +; CHECK-NEXT:    movq2dq %mm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call x86_mmx asm sideeffect "nop", "=y,~{mm1},~{mm2},~{mm3},~{mm4},~{mm5},~{mm6},~{mm7}"()
>    %2 = call x86_mmx @llvm.x86.3dnowa.pswapd(x86_mmx %a) nounwind readnone
>    ret x86_mmx %2
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-adx-x86_64.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-adx-x86_64.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-adx-x86_64.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-adx-x86_64.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+adx < %s | FileCheck %s --check-prefix=CHECK --check-prefix=ADX
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=-adx < %s | FileCheck %s --check-prefix=CHECK --check-prefix=NOADX
>
> @@ -10,8 +11,53 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define i8 @stack_fold_addcarry_u32(i8 %a0, i32 %a1, i32 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_addcarry_u32
> -  ;CHECK:       adcl {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addcarry_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload
> +; CHECK-NEXT:    adcl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movl %edx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i32 } @llvm.x86.addcarry.32(i8 %a0, i32 %a1, i32 %a2)
>    %3 = extractvalue { i8, i32 } %2, 1
> @@ -22,8 +68,53 @@ define i8 @stack_fold_addcarry_u32(i8 %a
>  }
>
>  define i8 @stack_fold_addcarry_u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_addcarry_u64
> -  ;CHECK:       adcq {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addcarry_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
> +; CHECK-NEXT:    adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movq %rdx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i64 } @llvm.x86.addcarry.64(i8 %a0, i64 %a1, i64 %a2)
>    %3 = extractvalue { i8, i64 } %2, 1
> @@ -34,8 +125,53 @@ define i8 @stack_fold_addcarry_u64(i8 %a
>  }
>
>  define i8 @stack_fold_addcarryx_u32(i8 %a0, i32 %a1, i32 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_addcarryx_u32
> -  ;CHECK:       adcl {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addcarryx_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload
> +; CHECK-NEXT:    adcl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movl %edx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i32 } @llvm.x86.addcarry.32(i8 %a0, i32 %a1, i32 %a2)
>    %3 = extractvalue { i8, i32 } %2, 1
> @@ -46,8 +182,53 @@ define i8 @stack_fold_addcarryx_u32(i8 %
>  }
>
>  define i8 @stack_fold_addcarryx_u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_addcarryx_u64
> -  ;CHECK:       adcq {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addcarryx_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
> +; CHECK-NEXT:    adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movq %rdx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i64 } @llvm.x86.addcarry.64(i8 %a0, i64 %a1, i64 %a2)
>    %3 = extractvalue { i8, i64 } %2, 1
> @@ -58,8 +239,53 @@ define i8 @stack_fold_addcarryx_u64(i8 %
>  }
>
>  define i8 @stack_fold_subborrow_u32(i8 %a0, i32 %a1, i32 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_subborrow_u32
> -  ;CHECK:       sbbl {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subborrow_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload
> +; CHECK-NEXT:    sbbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movl %edx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i32 } @llvm.x86.subborrow.32(i8 %a0, i32 %a1, i32 %a2)
>    %3 = extractvalue { i8, i32 } %2, 1
> @@ -70,8 +296,53 @@ define i8 @stack_fold_subborrow_u32(i8 %
>  }
>
>  define i8 @stack_fold_subborrow_u64(i8 %a0, i64 %a1, i64 %a2, i8* %a3) {
> -  ;CHECK-LABEL: stack_fold_subborrow_u64
> -  ;CHECK:       sbbq {{-?[0-9]*}}(%rsp), %{{.*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subborrow_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    addb $-1, %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
> +; CHECK-NEXT:    sbbq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
> +; CHECK-NEXT:    movq %rdx, (%rcx)
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = call { i8, i64 } @llvm.x86.subborrow.64(i8 %a0, i64 %a1, i64 %a2)
>    %3 = extractvalue { i8, i64 } %2, 1
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-avx512bf16.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-avx512bf16.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-avx512bf16.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-avx512bf16.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512bf16,+avx512vl < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,8 +10,14 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <32 x i16> @stack_fold_cvtne2ps2bf16(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cvtne2ps2bf16:
> -  ;CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtne2ps2bf16:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <32 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %a0, <16 x float> %a1)
>    ret <32 x i16> %2
> @@ -19,7 +26,16 @@ declare <32 x i16> @llvm.x86.avx512bf16.
>
>  define <32 x i16> @stack_fold_cvtne2ps2bf16_mask(<16 x float> %a0, <16 x float> %a1, <32 x i16>* %passthru, i32 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_mask:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vmovdqa64 (%rdi), %zmm2
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <32 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %a0, <16 x float> %a1)
>    %3 = bitcast i32 %U to <32 x i1>
> @@ -31,7 +47,14 @@ define <32 x i16> @stack_fold_cvtne2ps2b
>
>  define <32 x i16> @stack_fold_cvtne2ps2bf16_maskz(<16 x float> %a0, <16 x float> %a1, i32 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_maskz:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <32 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.512(<16 x float> %a0, <16 x float> %a1)
>    %3 = bitcast i32 %U to <32 x i1>
> @@ -41,7 +64,13 @@ define <32 x i16> @stack_fold_cvtne2ps2b
>
>  define <16 x i16> @stack_fold_cvtneps2bf16(<16 x float> %a0) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16:
> -; CHECK:       vcvtneps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtneps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <16 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %a0)
>    ret <16 x i16> %2
> @@ -50,7 +79,16 @@ declare <16 x i16> @llvm.x86.avx512bf16.
>
>  define <16 x i16> @stack_fold_cvtneps2bf16_mask(<16 x float> %a0, <16 x i16>* %passthru, i16 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_mask:
> -; CHECK:       vcvtneps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vmovaps (%rdi), %ymm1
> +; CHECK-NEXT:    vcvtneps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %ymm1, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <16 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %a0)
>    %3 = bitcast i16 %U to <16 x i1>
> @@ -62,7 +100,14 @@ define <16 x i16> @stack_fold_cvtneps2bf
>
>  define <16 x i16> @stack_fold_cvtneps2bf16_maskz(<16 x float> %a0, i16 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_maskz:
> -; CHECK:       vcvtneps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtneps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <16 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.512(<16 x float> %a0)
>    %3 = bitcast i16 %U to <16 x i1>
> @@ -72,7 +117,13 @@ define <16 x i16> @stack_fold_cvtneps2bf
>
>  define <16 x float> @stack_fold_vdpbf16ps(<16 x float> %a0, <16 x i32> %a1, <16 x i32> %a2) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(<16 x float> %a0, <16 x i32> %a1, <16 x i32> %a2)
>    ret <16 x float> %2
> @@ -81,7 +132,16 @@ declare <16 x float> @llvm.x86.avx512bf1
>
>  define <16 x float> @stack_fold_vdpbf16ps_mask(<16 x float>* %a0, <16 x i32> %a1, <16 x i32> %a2, <16 x float>* %passthru, i16 %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_mask:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm2
> +; CHECK-NEXT:    kmovd %edx, %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load needed to keep the operation from being scheduled above the asm block
>    %2 = load <16 x float>, <16 x float>* %a0
> @@ -93,7 +153,14 @@ define <16 x float> @stack_fold_vdpbf16p
>
>  define <16 x float> @stack_fold_vdpbf16ps_maskz(<16 x float> %a0, <16 x i32> %a1, <16 x i32> %a2, i16* %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_maskz:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw (%rdi), %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <16 x float> @llvm.x86.avx512bf16.dpbf16ps.512(<16 x float> %a0, <16 x i32> %a1, <16 x i32> %a2)
>    %3 = load i16, i16* %U
> @@ -105,8 +172,14 @@ define <16 x float> @stack_fold_vdpbf16p
>
>
>  define <16 x i16> @stack_fold_cvtne2ps2bf16_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cvtne2ps2bf16_ymm:
> -  ;CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtne2ps2bf16_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %a0, <8 x float> %a1)
>    ret <16 x i16> %2
> @@ -115,7 +188,16 @@ declare <16 x i16> @llvm.x86.avx512bf16.
>
>  define <16 x i16> @stack_fold_cvtne2ps2bf16_mask_ymm(<8 x float> %a0, <8 x float> %a1, <16 x i16>* %passthru, i16 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_mask_ymm:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vmovdqa (%rdi), %ymm2
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 {%k1} # 32-byte Folded Reload
> +; CHECK-NEXT:    vmovdqa %ymm2, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %a0, <8 x float> %a1)
>    %3 = bitcast i16 %U to <16 x i1>
> @@ -127,7 +209,14 @@ define <16 x i16> @stack_fold_cvtne2ps2b
>
>  define <16 x i16> @stack_fold_cvtne2ps2bf16_maskz_ymm(<8 x float> %a0, <8 x float> %a1, i16 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_maskz_ymm:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.256(<8 x float> %a0, <8 x float> %a1)
>    %3 = bitcast i16 %U to <16 x i1>
> @@ -137,7 +226,14 @@ define <16 x i16> @stack_fold_cvtne2ps2b
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_ymm(<8 x float> %a0) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_ymm:
> -; CHECK:       vcvtneps2bf16y {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtneps2bf16y {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.256(<8 x float> %a0)
>    ret <8 x i16> %2
> @@ -146,7 +242,17 @@ declare <8 x i16> @llvm.x86.avx512bf16.c
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_mask_ymm(<8 x float> %a0, <8 x i16>* %passthru, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_mask_ymm:
> -; CHECK:       vcvtneps2bf16y {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm1
> +; CHECK-NEXT:    vcvtneps2bf16y {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 {%k1} # 32-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %xmm1, %xmm0
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.256(<8 x float> %a0)
>    %3 = bitcast i8 %U to <8 x i1>
> @@ -158,7 +264,15 @@ define <8 x i16> @stack_fold_cvtneps2bf1
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_maskz_ymm(<8 x float> %a0, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_maskz_ymm:
> -; CHECK:       vcvtneps2bf16y {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtneps2bf16y {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x i16> @llvm.x86.avx512bf16.cvtneps2bf16.256(<8 x float> %a0)
>    %3 = bitcast i8 %U to <8 x i1>
> @@ -168,7 +282,13 @@ define <8 x i16> @stack_fold_cvtneps2bf1
>
>  define <8 x float> @stack_fold_vdpbf16ps_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i32> %a2) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_ymm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(<8 x float> %a0, <8 x i32> %a1, <8 x i32> %a2)
>    ret <8 x float> %2
> @@ -177,7 +297,16 @@ declare <8 x float> @llvm.x86.avx512bf16
>
>  define <8 x float> @stack_fold_vdpbf16ps_mask_ymm(<8 x float>* %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x float>* %passthru, i8 %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_mask_ymm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %ymm2
> +; CHECK-NEXT:    kmovd %edx, %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 {%k1} # 32-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %ymm2, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load needed to keep the operation from being scheduled above the asm block
>    %2 = load <8 x float>, <8 x float>* %a0
> @@ -189,7 +318,15 @@ define <8 x float> @stack_fold_vdpbf16ps
>
>  define <8 x float> @stack_fold_vdpbf16ps_maskz_ymm(<8 x float> %a0, <8 x i32> %a1, <8 x i32> %a2, i8* %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_maskz_ymm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movzbl (%rdi), %eax
> +; CHECK-NEXT:    kmovd %eax, %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x float> @llvm.x86.avx512bf16.dpbf16ps.256(<8 x float> %a0, <8 x i32> %a1, <8 x i32> %a2)
>    %3 = load i8, i8* %U
> @@ -202,8 +339,14 @@ define <8 x float> @stack_fold_vdpbf16ps
>
>
>  define <8 x i16> @stack_fold_cvtne2ps2bf16_xmm(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cvtne2ps2bf16_xmm:
> -  ;CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtne2ps2bf16_xmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %a0, <4 x float> %a1)
>    ret <8 x i16> %2
> @@ -212,7 +355,16 @@ declare <8 x i16> @llvm.x86.avx512bf16.c
>
>  define <8 x i16> @stack_fold_cvtne2ps2bf16_mask_xmm(<4 x float> %a0, <4 x float> %a1, <8 x i16>* %passthru, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_mask_xmm:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vmovdqa (%rdi), %xmm2
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovdqa %xmm2, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %a0, <4 x float> %a1)
>    %3 = bitcast i8 %U to <8 x i1>
> @@ -224,7 +376,14 @@ define <8 x i16> @stack_fold_cvtne2ps2bf
>
>  define <8 x i16> @stack_fold_cvtne2ps2bf16_maskz_xmm(<4 x float> %a0, <4 x float> %a1, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtne2ps2bf16_maskz_xmm:
> -; CHECK:       vcvtne2ps2bf16 {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtne2ps2bf16 {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 {%k1} {z} # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.avx512bf16.cvtne2ps2bf16.128(<4 x float> %a0, <4 x float> %a1)
>    %3 = bitcast i8 %U to <8 x i1>
> @@ -234,7 +393,13 @@ define <8 x i16> @stack_fold_cvtne2ps2bf
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_xmm(<4 x float> %a0) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_xmm:
> -; CHECK:       vcvtneps2bf16x {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtneps2bf16x {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <8 x i16> @llvm.x86.avx512bf16.mask.cvtneps2bf16.128(<4 x float> %a0, <8 x i16> undef, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
>    ret <8 x i16> %2
> @@ -243,7 +408,16 @@ declare <8 x i16> @llvm.x86.avx512bf16.m
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_mask_xmm(<4 x float> %a0, <8 x i16>* %passthru, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_mask_xmm:
> -; CHECK:       vcvtneps2bf16x {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm1
> +; CHECK-NEXT:    kmovd %esi, %k1
> +; CHECK-NEXT:    vcvtneps2bf16x {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 {%k1} # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <8 x i16>, <8 x i16>* %passthru
>    %3 = bitcast i8 %U to <8 x i1>
> @@ -254,7 +428,14 @@ define <8 x i16> @stack_fold_cvtneps2bf1
>
>  define <8 x i16> @stack_fold_cvtneps2bf16_maskz_xmm(<4 x float> %a0, i8 %U) {
>  ; CHECK-LABEL: stack_fold_cvtneps2bf16_maskz_xmm:
> -; CHECK:       vcvtneps2bf16x {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovd %edi, %k1
> +; CHECK-NEXT:    vcvtneps2bf16x {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 {%k1} {z} # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast i8 %U to <8 x i1>
>    %3 = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
> @@ -264,7 +445,13 @@ define <8 x i16> @stack_fold_cvtneps2bf1
>
>  define <4 x float> @stack_fold_vdpbf16ps_xmm(<4 x float> %a0, <4 x i32> %a1, <4 x i32> %a2) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_xmm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(<4 x float> %a0, <4 x i32> %a1, <4 x i32> %a2)
>    ret <4 x float> %2
> @@ -273,7 +460,16 @@ declare <4 x float> @llvm.x86.avx512bf16
>
>  define <4 x float> @stack_fold_vdpbf16ps_mask_xmm(<4 x float>* %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x float>* %passthru, i8 %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_mask_xmm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm2
> +; CHECK-NEXT:    kmovd %edx, %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %xmm2, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load needed to keep the operation from being scheduled above the asm block
>    %2 = load <4 x float>, <4 x float>* %a0
> @@ -286,7 +482,15 @@ define <4 x float> @stack_fold_vdpbf16ps
>
>  define <4 x float> @stack_fold_vdpbf16ps_maskz_xmm(<4 x float> %a0, <4 x i32> %a1, <4 x i32> %a2, i8* %U) {
>  ; CHECK-LABEL: stack_fold_vdpbf16ps_maskz_xmm:
> -; CHECK:       vdpbf16ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movzbl (%rdi), %eax
> +; CHECK-NEXT:    kmovd %eax, %k1
> +; CHECK-NEXT:    vdpbf16ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 {%k1} {z} # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = tail call <4 x float> @llvm.x86.avx512bf16.dpbf16ps.128(<4 x float> %a0, <4 x i32> %a1, <4 x i32> %a2)
>    %3 = load i8, i8* %U
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-avx512vp2intersect.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-avx512vp2intersect.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-avx512vp2intersect.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-avx512vp2intersect.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vp2intersect,+avx512vl < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -5,7 +6,17 @@ target triple = "x86_64-unknown-unknown"
>
>  define void @stack_fold_vp2intersectd(<16 x i32>* %a, <16 x i32> %b, <16 x i1>* nocapture %m0, <16 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectd:
> -; CHECK:    vp2intersectd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm0
> +; CHECK-NEXT:    vp2intersectd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 # 64-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, (%rsi)
> +; CHECK-NEXT:    kmovw %k1, (%rdx)
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <16 x i32>, <16 x i32>* %a
>    %3 = tail call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %2, <16 x i32> %b)
> @@ -19,7 +30,19 @@ declare { <16 x i1>, <16 x i1> } @llvm.x
>
>  define void @stack_fold_vp2intersectq(<8 x i64>* %a, <8 x i64> %b, <8 x i1>* nocapture %m0, <8 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectq:
> -; CHECK:    vp2intersectq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 64-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm0
> +; CHECK-NEXT:    vp2intersectq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 # 64-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k1, %eax
> +; CHECK-NEXT:    kmovw %k0, %ecx
> +; CHECK-NEXT:    movb %cl, (%rsi)
> +; CHECK-NEXT:    movb %al, (%rdx)
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <8 x i64>, <8 x i64>* %a
>    %3 = tail call { <8 x i1>, <8 x i1> } @llvm.x86.avx512.vp2intersect.q.512(<8 x i64> %2, <8 x i64> %b)
> @@ -33,7 +56,19 @@ declare { <8 x i1>, <8 x i1> } @llvm.x86
>
>  define void @stack_fold_vp2intersectd_256(<8 x i32>* %a, <8 x i32> %b, <8 x i1>* nocapture %m0, <8 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectd_256:
> -; CHECK:    vp2intersectd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %ymm0
> +; CHECK-NEXT:    vp2intersectd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 # 32-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k1, %eax
> +; CHECK-NEXT:    kmovw %k0, %ecx
> +; CHECK-NEXT:    movb %cl, (%rsi)
> +; CHECK-NEXT:    movb %al, (%rdx)
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <8 x i32>, <8 x i32>* %a
>    %3 = tail call { <8 x i1>, <8 x i1> } @llvm.x86.avx512.vp2intersect.d.256(<8 x i32> %2, <8 x i32> %b)
> @@ -47,7 +82,19 @@ declare { <8 x i1>, <8 x i1> } @llvm.x86
>
>  define void @stack_fold_vp2intersectq_256(<4 x i64>* %a, <4 x i64> %b, <4 x i1>* nocapture %m0, <4 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectq_256:
> -; CHECK:    vp2intersectq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 32-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %ymm0
> +; CHECK-NEXT:    vp2intersectq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 # 32-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k1, %eax
> +; CHECK-NEXT:    kmovw %k0, %ecx
> +; CHECK-NEXT:    movb %cl, (%rsi)
> +; CHECK-NEXT:    movb %al, (%rdx)
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <4 x i64>, <4 x i64>* %a
>    %3 = tail call { <4 x i1>, <4 x i1> } @llvm.x86.avx512.vp2intersect.q.256(<4 x i64> %2, <4 x i64> %b)
> @@ -61,7 +108,18 @@ declare { <4 x i1>, <4 x i1> } @llvm.x86
>
>  define void @stack_fold_vp2intersectd_128(<4 x i32>* %a, <4 x i32> %b, <4 x i1>* nocapture %m0, <4 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectd_128:
> -; CHECK:    vp2intersectd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm0
> +; CHECK-NEXT:    vp2intersectd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 # 16-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k1, %eax
> +; CHECK-NEXT:    kmovw %k0, %ecx
> +; CHECK-NEXT:    movb %cl, (%rsi)
> +; CHECK-NEXT:    movb %al, (%rdx)
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <4 x i32>, <4 x i32>* %a
>    %3 = tail call { <4 x i1>, <4 x i1> } @llvm.x86.avx512.vp2intersect.d.128(<4 x i32> %2, <4 x i32> %b)
> @@ -75,7 +133,18 @@ declare { <4 x i1>, <4 x i1> } @llvm.x86
>
>  define void @stack_fold_vp2intersectq_128(<2 x i64>* %a, <2 x i64> %b, <2 x i1>* nocapture %m0, <2 x i1>* nocapture %m1) {
>  ; CHECK-LABEL: stack_fold_vp2intersectq_128:
> -; CHECK:    vp2intersectq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm0
> +; CHECK-NEXT:    vp2intersectq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 # 16-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k1, %eax
> +; CHECK-NEXT:    kmovw %k0, %ecx
> +; CHECK-NEXT:    movb %cl, (%rsi)
> +; CHECK-NEXT:    movb %al, (%rdx)
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = load <2 x i64>, <2 x i64>* %a
>    %3 = tail call { <2 x i1>, <2 x i1> } @llvm.x86.avx512.vp2intersect.q.128(<2 x i64> %2, <2 x i64> %b)
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-bmi.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-bmi.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-bmi.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-bmi.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,8 +10,46 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define i32 @stack_fold_andn_u32(i32 %a0, i32 %a1) {
> -  ;CHECK-LABEL: stack_fold_andn_u32
> -  ;CHECK:       andnl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andn_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    andnl {{[-0-9]+}}(%r{{[sb]}}p), %eax, %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = xor i32 %a0, -1
>    %3 = and i32 %a1, %2
> @@ -18,8 +57,46 @@ define i32 @stack_fold_andn_u32(i32 %a0,
>  }
>
>  define i64 @stack_fold_andn_u64(i64 %a0, i64 %a1) {
> -  ;CHECK-LABEL: stack_fold_andn_u64
> -  ;CHECK:       andnq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andn_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    andnq {{[-0-9]+}}(%r{{[sb]}}p), %rax, %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = xor i64 %a0, -1
>    %3 = and i64 %a1, %2
> @@ -27,9 +104,46 @@ define i64 @stack_fold_andn_u64(i64 %a0,
>  }
>
>  define i32 @stack_fold_bextr_u32(i32 %a0, i32 %a1) {
> -  ;CHECK-LABEL: stack_fold_bextr_u32
> -  ;CHECK:       # %bb.0:
> -  ;CHECK:       bextrl %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_bextr_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    bextrl %eax, {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a0, i32 %a1)
>    ret i32 %2
> @@ -37,9 +151,46 @@ define i32 @stack_fold_bextr_u32(i32 %a0
>  declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
>
>  define i64 @stack_fold_bextr_u64(i64 %a0, i64 %a1) {
> -  ;CHECK-LABEL: stack_fold_bextr_u64
> -  ;CHECK:       # %bb.0:
> -  ;CHECK:       bextrq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_bextr_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    bextrq %rax, {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a0, i64 %a1)
>    ret i64 %2
> @@ -47,8 +198,44 @@ define i64 @stack_fold_bextr_u64(i64 %a0
>  declare i64 @llvm.x86.bmi.bextr.64(i64, i64)
>
>  define i32 @stack_fold_blsi_u32(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsi_u32
> -  ;CHECK:       blsil {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsi_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsil {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i32 0, %a0
>    %3 = and i32 %2, %a0
> @@ -56,8 +243,44 @@ define i32 @stack_fold_blsi_u32(i32 %a0)
>  }
>
>  define i64 @stack_fold_blsi_u64(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsi_u64
> -  ;CHECK:       blsiq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsi_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsiq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i64 0, %a0
>    %3 = and i64 %2, %a0
> @@ -65,8 +288,44 @@ define i64 @stack_fold_blsi_u64(i64 %a0)
>  }
>
>  define i32 @stack_fold_blsmsk_u32(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsmsk_u32
> -  ;CHECK:       blsmskl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsmsk_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsmskl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i32 %a0, 1
>    %3 = xor i32 %2, %a0
> @@ -74,8 +333,44 @@ define i32 @stack_fold_blsmsk_u32(i32 %a
>  }
>
>  define i64 @stack_fold_blsmsk_u64(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsmsk_u64
> -  ;CHECK:       blsmskq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsmsk_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsmskq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i64 %a0, 1
>    %3 = xor i64 %2, %a0
> @@ -83,8 +378,44 @@ define i64 @stack_fold_blsmsk_u64(i64 %a
>  }
>
>  define i32 @stack_fold_blsr_u32(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsr_u32
> -  ;CHECK:       blsrl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsr_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsrl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i32 %a0, 1
>    %3 = and i32 %2, %a0
> @@ -92,8 +423,44 @@ define i32 @stack_fold_blsr_u32(i32 %a0)
>  }
>
>  define i64 @stack_fold_blsr_u64(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_blsr_u64
> -  ;CHECK:       blsrq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blsr_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blsrq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sub i64 %a0, 1
>    %3 = and i64 %2, %a0
> @@ -103,8 +470,44 @@ define i64 @stack_fold_blsr_u64(i64 %a0)
>  ;TODO stack_fold_tzcnt_u16
>
>  define i32 @stack_fold_tzcnt_u32(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_tzcnt_u32
> -  ;CHECK:       tzcntl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_tzcnt_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    tzcntl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i32 @llvm.cttz.i32(i32 %a0, i1 0)
>    ret i32 %2
> @@ -112,8 +515,44 @@ define i32 @stack_fold_tzcnt_u32(i32 %a0
>  declare i32 @llvm.cttz.i32(i32, i1)
>
>  define i64 @stack_fold_tzcnt_u64(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_tzcnt_u64
> -  ;CHECK:       tzcntq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_tzcnt_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    tzcntq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i64 @llvm.cttz.i64(i64 %a0, i1 0)
>    ret i64 %2
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-bmi2.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-bmi2.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-bmi2.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-bmi2.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,8 +10,46 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define i32 @stack_fold_bzhi_u32(i32 %a0, i32 %a1)   {
> -  ;CHECK-LABEL: stack_fold_bzhi_u32
> -  ;CHECK:       bzhil %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_bzhi_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    bzhil %eax, {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a0, i32 %a1)
>    ret i32 %2
> @@ -18,8 +57,46 @@ define i32 @stack_fold_bzhi_u32(i32 %a0,
>  declare i32 @llvm.x86.bmi.bzhi.32(i32, i32)
>
>  define i64 @stack_fold_bzhi_u64(i64 %a0, i64 %a1)   {
> -  ;CHECK-LABEL: stack_fold_bzhi_u64
> -  ;CHECK:       bzhiq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_bzhi_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    bzhiq %rax, {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a0, i64 %a1)
>    ret i64 %2
> @@ -27,8 +104,46 @@ define i64 @stack_fold_bzhi_u64(i64 %a0,
>  declare i64 @llvm.x86.bmi.bzhi.64(i64, i64)
>
>  define i32 @stack_fold_pdep_u32(i32 %a0, i32 %a1)   {
> -  ;CHECK-LABEL: stack_fold_pdep_u32
> -  ;CHECK:       pdepl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pdep_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    pdepl {{[-0-9]+}}(%r{{[sb]}}p), %eax, %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1)
>    ret i32 %2
> @@ -36,8 +151,46 @@ define i32 @stack_fold_pdep_u32(i32 %a0,
>  declare i32 @llvm.x86.bmi.pdep.32(i32, i32)
>
>  define i64 @stack_fold_pdep_u64(i64 %a0, i64 %a1)   {
> -  ;CHECK-LABEL: stack_fold_pdep_u64
> -  ;CHECK:       pdepq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pdep_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    pdepq {{[-0-9]+}}(%r{{[sb]}}p), %rax, %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1)
>    ret i64 %2
> @@ -45,8 +198,46 @@ define i64 @stack_fold_pdep_u64(i64 %a0,
>  declare i64 @llvm.x86.bmi.pdep.64(i64, i64)
>
>  define i32 @stack_fold_pext_u32(i32 %a0, i32 %a1)   {
> -  ;CHECK-LABEL: stack_fold_pext_u32
> -  ;CHECK:       pextl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pext_u32:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    pextl {{[-0-9]+}}(%r{{[sb]}}p), %eax, %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1)
>    ret i32 %2
> @@ -54,8 +245,46 @@ define i32 @stack_fold_pext_u32(i32 %a0,
>  declare i32 @llvm.x86.bmi.pext.32(i32, i32)
>
>  define i64 @stack_fold_pext_u64(i64 %a0, i64 %a1)   {
> -  ;CHECK-LABEL: stack_fold_pext_u64
> -  ;CHECK:       pextq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pext_u64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    pextq {{[-0-9]+}}(%r{{[sb]}}p), %rax, %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1)
>    ret i64 %2
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx1.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -verify-machineinstrs -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,48 +10,84 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_addpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd_ymm
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_addps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps_ymm
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_addsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd
> -  ;CHECK:       vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd_int
> -  ;CHECK:       vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -61,16 +98,28 @@ define <2 x double> @stack_fold_addsd_in
>  declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_addss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_addss
> -  ;CHECK:       vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addss_int
> -  ;CHECK:       vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -81,8 +130,14 @@ define <4 x float> @stack_fold_addss_int
>  declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_addsubpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubpd
> -  ;CHECK:       vaddsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -90,8 +145,14 @@ define <2 x double> @stack_fold_addsubpd
>  declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_addsubpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubpd_ymm
> -  ;CHECK:       vaddsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsubpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -99,8 +160,14 @@ define <4 x double> @stack_fold_addsubpd
>  declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_addsubps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubps
> -  ;CHECK:       vaddsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -108,8 +175,14 @@ define <4 x float> @stack_fold_addsubps(
>  declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_addsubps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubps_ymm
> -  ;CHECK:       vaddsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsubps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -117,8 +190,16 @@ define <8 x float> @stack_fold_addsubps_
>  declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd
> -  ;CHECK:       vandnpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -131,8 +212,16 @@ define <2 x double> @stack_fold_andnpd(<
>  }
>
>  define <4 x double> @stack_fold_andnpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd_ymm
> -  ;CHECK:       vandnpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -145,8 +234,16 @@ define <4 x double> @stack_fold_andnpd_y
>  }
>
>  define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps
> -  ;CHECK:       vandnps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -159,8 +256,16 @@ define <4 x float> @stack_fold_andnps(<4
>  }
>
>  define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps_ymm
> -  ;CHECK:       vandnps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <4 x i64>
>    %3 = bitcast <8 x float> %a1 to <4 x i64>
> @@ -173,8 +278,16 @@ define <8 x float> @stack_fold_andnps_ym
>  }
>
>  define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd
> -  ;CHECK:       vandpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -186,8 +299,16 @@ define <2 x double> @stack_fold_andpd(<2
>  }
>
>  define <4 x double> @stack_fold_andpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd_ymm
> -  ;CHECK:       vandpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -199,8 +320,16 @@ define <4 x double> @stack_fold_andpd_ym
>  }
>
>  define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps
> -  ;CHECK:       vandps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -212,8 +341,16 @@ define <4 x float> @stack_fold_andps(<4
>  }
>
>  define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps_ymm
> -  ;CHECK:       vandps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <4 x i64>
>    %3 = bitcast <8 x float> %a1 to <4 x i64>
> @@ -225,8 +362,17 @@ define <8 x float> @stack_fold_andps_ymm
>  }
>
>  define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendpd
> -  ;CHECK:       vblendpd $2, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[1]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <2 x i1> <i1 1, i1 0>, <2 x double> %a0, <2 x double> %a1
>    ; fadd forces execution domain
> @@ -235,8 +381,17 @@ define <2 x double> @stack_fold_blendpd(
>  }
>
>  define <4 x double> @stack_fold_blendpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendpd_ymm
> -  ;CHECK:       vblendpd $6, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendpd $6, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[1,2],ymm0[3]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x double> %a0, <4 x double> %a1
>    ; fadd forces execution domain
> @@ -244,8 +399,6 @@ define <4 x double> @stack_fold_blendpd_
>    ret <4 x double> %3}
>
>  define <4 x float> @stack_fold_blendps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendps
> -  ;CHECK:       vblendps $6, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x float> %a0, <4 x float> %a1
>    ; fadd forces execution domain
> @@ -254,8 +407,17 @@ define <4 x float> @stack_fold_blendps(<
>  }
>
>  define <8 x float> @stack_fold_blendps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendps_ymm
> -  ;CHECK:       vblendps $102, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendps $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[1,2],ymm0[3,4],mem[5,6],ymm0[7]
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <8 x i1> <i1 1, i1 0, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1>, <8 x float> %a0, <8 x float> %a1
>    ; fadd forces execution domain
> @@ -264,8 +426,14 @@ define <8 x float> @stack_fold_blendps_y
>  }
>
>  define <2 x double> @stack_fold_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvpd
> -  ;CHECK:       vblendvpd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendvpd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a1, <2 x double> %c, <2 x double> %a0)
>    ret <2 x double> %2
> @@ -273,8 +441,14 @@ define <2 x double> @stack_fold_blendvpd
>  declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_blendvpd_ymm(<4 x double> %a0, <4 x double> %a1, <4 x double> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvpd_ymm
> -  ;CHECK:       vblendvpd {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendvpd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a1, <4 x double> %c, <4 x double> %a0)
>    ret <4 x double> %2
> @@ -282,8 +456,14 @@ define <4 x double> @stack_fold_blendvpd
>  declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvps
> -  ;CHECK:       vblendvps {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendvps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a1, <4 x float> %c, <4 x float> %a0)
>    ret <4 x float> %2
> @@ -291,8 +471,14 @@ define <4 x float> @stack_fold_blendvps(
>  declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_blendvps_ymm(<8 x float> %a0, <8 x float> %a1, <8 x float> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvps_ymm
> -  ;CHECK:       vblendvps {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vblendvps %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a1, <8 x float> %c, <8 x float> %a0)
>    ret <8 x float> %2
> @@ -300,8 +486,14 @@ define <8 x float> @stack_fold_blendvps_
>  declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cmppd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 0)
>    ret <2 x double> %2
> @@ -309,8 +501,14 @@ define <2 x double> @stack_fold_cmppd(<2
>  declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define <4 x double> @stack_fold_cmppd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd_ymm
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 0)
>    ret <4 x double> %2
> @@ -318,8 +516,14 @@ define <4 x double> @stack_fold_cmppd_ym
>  declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
>
>  define <4 x float> @stack_fold_cmpps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
>    ret <4 x float> %2
> @@ -327,8 +531,14 @@ define <4 x float> @stack_fold_cmpps(<4
>  declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define <8 x float> @stack_fold_cmpps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps_ymm
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 0)
>    ret <8 x float> %2
> @@ -336,8 +546,17 @@ define <8 x float> @stack_fold_cmpps_ymm
>  declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
>
>  define i32 @stack_fold_cmpsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpsd
> -  ;CHECK:       vcmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    vmovq %xmm0, %rax
> +; CHECK-NEXT:    andl $1, %eax
> +; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp oeq double %a0, %a1
>    %3 = zext i1 %2 to i32
> @@ -345,8 +564,14 @@ define i32 @stack_fold_cmpsd(double %a0,
>  }
>
>  define <2 x double> @stack_fold_cmpsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpsd_int
> -  ;CHECK:       vcmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
>    ret <2 x double> %2
> @@ -354,8 +579,16 @@ define <2 x double> @stack_fold_cmpsd_in
>  declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define i32 @stack_fold_cmpss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpss
> -  ;CHECK:       vcmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    vmovd %xmm0, %eax
> +; CHECK-NEXT:    andl $1, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp oeq float %a0, %a1
>    %3 = zext i1 %2 to i32
> @@ -363,8 +596,14 @@ define i32 @stack_fold_cmpss(float %a0,
>  }
>
>  define <4 x float> @stack_fold_cmpss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpss_int
> -  ;CHECK:       vcmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
>    ret <4 x float> %2
> @@ -374,8 +613,18 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
>  ; TODO stack_fold_comisd
>
>  define i32 @stack_fold_comisd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_comisd_int
> -  ;CHECK:       vcomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_comisd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcomisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
>    ret i32 %2
> @@ -385,8 +634,18 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2
>  ; TODO stack_fold_comiss
>
>  define i32 @stack_fold_comiss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_comiss_int
> -  ;CHECK:       vcomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_comiss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
>    ret i32 %2
> @@ -394,16 +653,28 @@ define i32 @stack_fold_comiss_int(<4 x f
>  declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cvtdq2pd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
>    %3 = sitofp <2 x i32> %2 to <2 x double>
>    ret <2 x double> %3
>  }
>  define <2 x double> @stack_fold_cvtdq2pd_int(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd_int
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a0, <2 x i32> <i32 0, i32 1>
>    %cvt = sitofp <2 x i32> %2 to <2 x double>
> @@ -411,40 +682,70 @@ define <2 x double> @stack_fold_cvtdq2pd
>  }
>
>  define <4 x double> @stack_fold_cvtdq2pd_ymm(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd_ymm
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sitofp <4 x i32> %a0 to <4 x double>
>    ret <4 x double> %2
>  }
>
>  define <4 x double> @stack_fold_cvtdq2pd_ymm_int(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd_ymm_int
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd_ymm_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %cvt = sitofp <4 x i32> %a0 to <4 x double>
>    ret <4 x double> %cvt
>  }
>
>  define <4 x float> @stack_fold_cvtdq2ps(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2ps
> -  ;CHECK:   vcvtdq2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sitofp <4 x i32> %a0 to <4 x float>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_cvtdq2ps_ymm(<8 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2ps_ymm
> -  ;CHECK:   vcvtdq2ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sitofp <8 x i32> %a0 to <8 x float>
>    ret <8 x float> %2
>  }
>
>  define <4 x i32> @stack_fold_cvtpd2dq(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2dq
> -  ;CHECK:   vcvtpd2dqx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2dqx {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
>    ret <4 x i32> %2
> @@ -452,8 +753,15 @@ define <4 x i32> @stack_fold_cvtpd2dq(<2
>  declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
>
>  define <4 x i32> @stack_fold_cvtpd2dq_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2dq_ymm
> -  ;CHECK:   vcvtpd2dqy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2dq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2dqy {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0)
>    ret <4 x i32> %2
> @@ -461,24 +769,43 @@ define <4 x i32> @stack_fold_cvtpd2dq_ym
>  declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
>
>  define <2 x float> @stack_fold_cvtpd2ps(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps
> -  ;CHECK:   vcvtpd2psx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2psx {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptrunc <2 x double> %a0 to <2 x float>
>    ret <2 x float> %2
>  }
>
>  define <4 x float> @stack_fold_cvtpd2ps_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps_ymm
> -  ;CHECK:   vcvtpd2psy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2psy {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptrunc <4 x double> %a0 to <4 x float>
>    ret <4 x float> %2
>  }
>
>  define <4 x float> @stack_fold_cvtph2ps(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtph2ps
> -  ;CHECK:   vcvtph2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtph2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0)
>    ret <4 x float> %2
> @@ -486,8 +813,14 @@ define <4 x float> @stack_fold_cvtph2ps(
>  declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly
>
>  define <8 x float> @stack_fold_cvtph2ps_ymm(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtph2ps_ymm
> -  ;CHECK:   vcvtph2ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtph2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0)
>    ret <8 x float> %2
> @@ -495,8 +828,14 @@ define <8 x float> @stack_fold_cvtph2ps_
>  declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly
>
>  define <4 x i32> @stack_fold_cvtps2dq(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2dq
> -  ;CHECK:  vcvtps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
>    ret <4 x i32> %2
> @@ -504,8 +843,14 @@ define <4 x i32> @stack_fold_cvtps2dq(<4
>  declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
>
>  define <8 x i32> @stack_fold_cvtps2dq_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2dq_ymm
> -  ;CHECK:  vcvtps2dq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2dq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2dq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0)
>    ret <8 x i32> %2
> @@ -513,8 +858,14 @@ define <8 x i32> @stack_fold_cvtps2dq_ym
>  declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cvtps2pd(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd
> -  ;CHECK:   vcvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
>    %3 = fpext <2 x float> %2 to <2 x double>
> @@ -522,8 +873,14 @@ define <2 x double> @stack_fold_cvtps2pd
>  }
>
>  define <2 x double> @stack_fold_cvtps2pd_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd_int
> -  ;CHECK:   vcvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
>    %cvtps2pd = fpext <2 x float> %2 to <2 x double>
> @@ -531,24 +888,43 @@ define <2 x double> @stack_fold_cvtps2pd
>  }
>
>  define <4 x double> @stack_fold_cvtps2pd_ymm(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd_ymm
> -  ;CHECK:   vcvtps2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fpext <4 x float> %a0 to <4 x double>
>    ret <4 x double> %2
>  }
>
>  define <4 x double> @stack_fold_cvtps2pd_ymm_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd_ymm_int
> -  ;CHECK:   vcvtps2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd_ymm_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %cvtps2pd = fpext <4 x float> %a0 to <4 x double>
>    ret <4 x double> %cvtps2pd
>  }
>
>  define <8 x i16> @stack_fold_cvtps2ph_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2ph_ymm
> -  ;CHECK:   vcvtps2ph $0, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_cvtps2ph_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vcvtps2ph $0, %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0)
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    ret <8 x i16> %1
> @@ -558,8 +934,14 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.25
>  ; TODO stack_fold_cvtsd2si
>
>  define i32 @stack_fold_cvtsd2si_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsd2si_int
> -  ;CHECK:  vcvtsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
>    ret i32 %2
> @@ -569,8 +951,14 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x
>  ; TODO stack_fold_cvtsd2si64
>
>  define i64 @stack_fold_cvtsd2si64_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsd2si64_int
> -  ;CHECK:  vcvtsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
>    ret i64 %2
> @@ -578,16 +966,89 @@ define i64 @stack_fold_cvtsd2si64_int(<2
>  declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
>
>  define double @stack_fold_cvtsi2sd(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2sd
> -  ;CHECK:  vcvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2sd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2sdl {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to double
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_cvtsi2sd_int(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2sd_int
> -  ;CHECK:  vcvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2sd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2sdl {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to double
>    %3 = insertelement <2 x double> zeroinitializer, double %2, i64 0
> @@ -595,16 +1056,89 @@ define <2 x double> @stack_fold_cvtsi2sd
>  }
>
>  define double @stack_fold_cvtsi642sd(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642sd
> -  ;CHECK:  vcvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642sd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2sdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to double
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_cvtsi642sd_int(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642sd_int
> -  ;CHECK:  vcvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642sd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2sdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to double
>    %3 = insertelement <2 x double> zeroinitializer, double %2, i64 0
> @@ -612,16 +1146,90 @@ define <2 x double> @stack_fold_cvtsi642
>  }
>
>  define float @stack_fold_cvtsi2ss(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2ss
> -  ;CHECK:  vcvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2ss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2ssl {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to float
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_cvtsi2ss_int(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2ss_int
> -  ;CHECK:  vcvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2ss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2ssl {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to float
>    %3 = insertelement <4 x float> zeroinitializer, float %2, i64 0
> @@ -629,16 +1237,90 @@ define <4 x float> @stack_fold_cvtsi2ss_
>  }
>
>  define float @stack_fold_cvtsi642ss(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642ss
> -  ;CHECK:  vcvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642ss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2ssq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to float
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_cvtsi642ss_int(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642ss_int
> -  ;CHECK:  vcvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642ss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtsi2ssq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to float
>    %3 = insertelement <4 x float> zeroinitializer, float %2, i64 0
> @@ -648,8 +1330,14 @@ define <4 x float> @stack_fold_cvtsi642s
>  ; TODO stack_fold_cvtss2si
>
>  define i32 @stack_fold_cvtss2si_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtss2si_int
> -  ;CHECK:  vcvtss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
>    ret i32 %2
> @@ -659,8 +1347,14 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x
>  ; TODO stack_fold_cvtss2si64
>
>  define i64 @stack_fold_cvtss2si64_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtss2si64_int
> -  ;CHECK:  vcvtss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
>    ret i64 %2
> @@ -668,8 +1362,14 @@ define i64 @stack_fold_cvtss2si64_int(<4
>  declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
>
>  define <4 x i32> @stack_fold_cvttpd2dq(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttpd2dq
> -  ;CHECK:  vcvttpd2dqx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttpd2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttpd2dqx {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
>    ret <4 x i32> %2
> @@ -677,40 +1377,71 @@ define <4 x i32> @stack_fold_cvttpd2dq(<
>  declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
>
>  define <4 x i32> @stack_fold_cvttpd2dq_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttpd2dq_ymm
> -  ;CHECK:  vcvttpd2dqy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttpd2dq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttpd2dqy {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi <4 x double> %a0 to <4 x i32>
>    ret <4 x i32> %2
>  }
>
>  define <4 x i32> @stack_fold_cvttps2dq(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttps2dq
> -  ;CHECK:  vcvttps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttps2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttps2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi <4 x float> %a0 to <4 x i32>
>    ret <4 x i32> %2
>  }
>
>  define <8 x i32> @stack_fold_cvttps2dq_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttps2dq_ymm
> -  ;CHECK:  vcvttps2dq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttps2dq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttps2dq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi <8 x float> %a0 to <8 x i32>
>    ret <8 x i32> %2
>  }
>
>  define i32 @stack_fold_cvttsd2si(double %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si
> -  ;CHECK:  vcvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi double %a0 to i32
>    ret i32 %2
>  }
>
>  define i32 @stack_fold_cvttsd2si_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si_int
> -  ;CHECK:  vcvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
>    ret i32 %2
> @@ -718,16 +1449,28 @@ define i32 @stack_fold_cvttsd2si_int(<2
>  declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
>
>  define i64 @stack_fold_cvttsd2si64(double %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si64
> -  ;CHECK:  vcvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi double %a0 to i64
>    ret i64 %2
>  }
>
>  define i64 @stack_fold_cvttsd2si64_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si64_int
> -  ;CHECK:  vcvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
>    ret i64 %2
> @@ -735,16 +1478,28 @@ define i64 @stack_fold_cvttsd2si64_int(<
>  declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
>
>  define i32 @stack_fold_cvttss2si(float %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si
> -  ;CHECK:  vcvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi float %a0 to i32
>    ret i32 %2
>  }
>
>  define i32 @stack_fold_cvttss2si_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si_int
> -  ;CHECK:  vcvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
>    ret i32 %2
> @@ -752,16 +1507,28 @@ define i32 @stack_fold_cvttss2si_int(<4
>  declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
>
>  define i64 @stack_fold_cvttss2si64(float %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si64
> -  ;CHECK:  vcvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi float %a0 to i64
>    ret i64 %2
>  }
>
>  define i64 @stack_fold_cvttss2si64_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si64_int
> -  ;CHECK:  vcvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)
>    ret i64 %2
> @@ -769,48 +1536,84 @@ define i64 @stack_fold_cvttss2si64_int(<
>  declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_divpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divpd
> -  ;CHECK:       vdivpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_divpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divpd_ymm
> -  ;CHECK:       vdivpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_divps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divps
> -  ;CHECK:       vdivps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_divps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divps_ymm
> -  ;CHECK:       vdivps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_divsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_divsd
> -  ;CHECK:       vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divsd_int
> -  ;CHECK:       vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -820,16 +1623,28 @@ define <2 x double> @stack_fold_divsd_in
>  }
>
>  define float @stack_fold_divss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_divss
> -  ;CHECK:       vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divss_int
> -  ;CHECK:       vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -839,8 +1654,14 @@ define <4 x float> @stack_fold_divss_int
>  }
>
>  define <2 x double> @stack_fold_dppd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_dppd
> -  ;CHECK:       vdppd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_dppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdppd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
>    ret <2 x double> %2
> @@ -848,8 +1669,14 @@ define <2 x double> @stack_fold_dppd(<2
>  declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define <4 x float> @stack_fold_dpps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_dpps
> -  ;CHECK:       vdpps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_dpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdpps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
>    ret <4 x float> %2
> @@ -857,8 +1684,14 @@ define <4 x float> @stack_fold_dpps(<4 x
>  declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define <8 x float> @stack_fold_dpps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_dpps_ymm
> -  ;CHECK:       vdpps $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_dpps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdpps $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7)
>    ret <8 x float> %2
> @@ -866,17 +1699,60 @@ define <8 x float> @stack_fold_dpps_ymm(
>  declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
>
>  define <4 x float> @stack_fold_extractf128(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_extractf128
> -  ;CHECK:       vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extractf128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf128 $1, %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    ret <4 x float> %1
>  }
>
>  define i32 @stack_fold_extractps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_extractps
> -  ;CHECK:       vextractps $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
> -  ;CHECK:       movl    {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
> +; CHECK-LABEL: stack_fold_extractps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    vextractps $1, %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    ; fadd forces execution domain
>    %1 = fadd <4 x float> %a0, %a1
>    %2 = extractelement <4 x float> %1, i32 1
> @@ -886,8 +1762,14 @@ define i32 @stack_fold_extractps(<4 x fl
>  }
>
>  define <2 x double> @stack_fold_haddpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddpd
> -  ;CHECK:       vhaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhaddpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -895,8 +1777,14 @@ define <2 x double> @stack_fold_haddpd(<
>  declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_haddpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddpd_ymm
> -  ;CHECK:       vhaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhaddpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -904,8 +1792,14 @@ define <4 x double> @stack_fold_haddpd_y
>  declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_haddps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddps
> -  ;CHECK:       vhaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhaddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -913,8 +1807,14 @@ define <4 x float> @stack_fold_haddps(<4
>  declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_haddps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddps_ymm
> -  ;CHECK:       vhaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhaddps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -922,8 +1822,14 @@ define <8 x float> @stack_fold_haddps_ym
>  declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_hsubpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubpd
> -  ;CHECK:       vhsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -931,8 +1837,14 @@ define <2 x double> @stack_fold_hsubpd(<
>  declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_hsubpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubpd_ymm
> -  ;CHECK:       vhsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhsubpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -940,8 +1852,14 @@ define <4 x double> @stack_fold_hsubpd_y
>  declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_hsubps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubps
> -  ;CHECK:       vhsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -949,8 +1867,14 @@ define <4 x float> @stack_fold_hsubps(<4
>  declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_hsubps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubps_ymm
> -  ;CHECK:       vhsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vhsubps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -958,17 +1882,30 @@ define <8 x float> @stack_fold_hsubps_ym
>  declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_insertf128(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertf128
> -  ;CHECK:       vinsertf128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ret <8 x float> %2
>  }
>
>  define <4 x float> @stack_fold_insertps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertps
> -  ;CHECK:       vinsertps $17, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> -  ;CHECK-NEXT:                                                                              {{.*#+}} xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-LABEL: stack_fold_insertps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertps $17, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 209)
>    ret <4 x float> %2
> @@ -976,8 +1913,14 @@ define <4 x float> @stack_fold_insertps(
>  declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define <2 x double> @stack_fold_maxpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -985,16 +1928,28 @@ define <2 x double> @stack_fold_maxpd(<2
>  declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <2 x double> @stack_fold_maxpd_commutable(<2 x double> %a0, <2 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_commutable
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_maxpd_ymm(<4 x double> %a0, <4 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd_ymm
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -1002,16 +1957,28 @@ define <4 x double> @stack_fold_maxpd_ym
>  declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_maxpd_ymm_commutable(<4 x double> %a0, <4 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_ymm_commutable
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_maxps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -1019,16 +1986,28 @@ define <4 x float> @stack_fold_maxps(<4
>  declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_maxps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_commutable
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_maxps_ymm(<8 x float> %a0, <8 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps_ymm
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -1036,16 +2015,28 @@ define <8 x float> @stack_fold_maxps_ymm
>  declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_maxps_ymm_commutable(<8 x float> %a0, <8 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_ymm_commutable
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_maxsd(double %a0, double %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxsd
> -  ;CHECK:       vmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -1053,8 +2044,14 @@ define double @stack_fold_maxsd(double %
>  }
>
>  define double @stack_fold_maxsd_commutable(double %a0, double %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxsd_commutable
> -  ;CHECK:       vmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -1062,8 +2059,14 @@ define double @stack_fold_maxsd_commutab
>  }
>
>  define <2 x double> @stack_fold_maxsd_int(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxsd_int
> -  ;CHECK:       vmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -1071,8 +2074,14 @@ define <2 x double> @stack_fold_maxsd_in
>  declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_maxss(float %a0, float %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxss
> -  ;CHECK:       vmaxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -1080,8 +2089,14 @@ define float @stack_fold_maxss(float %a0
>  }
>
>  define float @stack_fold_maxss_commutable(float %a0, float %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxss_commutable
> -  ;CHECK:       vmaxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -1089,8 +2104,14 @@ define float @stack_fold_maxss_commutabl
>  }
>
>  define <4 x float> @stack_fold_maxss_int(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxss_int
> -  ;CHECK:       vmaxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -1098,8 +2119,14 @@ define <4 x float> @stack_fold_maxss_int
>  declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_minpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minpd
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -1107,16 +2134,28 @@ define <2 x double> @stack_fold_minpd(<2
>  declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <2 x double> @stack_fold_minpd_commutable(<2 x double> %a0, <2 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_commutable
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_minpd_ymm(<4 x double> %a0, <4 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minpd_ymm
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -1124,16 +2163,28 @@ define <4 x double> @stack_fold_minpd_ym
>  declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_minpd_ymm_commutable(<4 x double> %a0, <4 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_ymm_commutable
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_minps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -1141,16 +2192,28 @@ define <4 x float> @stack_fold_minps(<4
>  declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_minps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_commutable
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_minps_ymm(<8 x float> %a0, <8 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps_ymm
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -1158,16 +2221,28 @@ define <8 x float> @stack_fold_minps_ymm
>  declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_minps_ymm_commutable(<8 x float> %a0, <8 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_ymm_commutable
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_minsd(double %a0, double %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minsd
> -  ;CHECK:       vminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -1175,8 +2250,14 @@ define double @stack_fold_minsd(double %
>  }
>
>  define double @stack_fold_minsd_commutable(double %a0, double %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minsd_commutable
> -  ;CHECK:       vminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -1184,8 +2265,14 @@ define double @stack_fold_minsd_commutab
>  }
>
>  define <2 x double> @stack_fold_minsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_minsd_int
> -  ;CHECK:       vminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -1193,8 +2280,14 @@ define <2 x double> @stack_fold_minsd_in
>  declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_minss(float %a0, float %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minss
> -  ;CHECK:       vminss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -1202,8 +2295,14 @@ define float @stack_fold_minss(float %a0
>  }
>
>  define float @stack_fold_minss_commutable(float %a0, float %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minss_commutable
> -  ;CHECK:       vminss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -1211,8 +2310,14 @@ define float @stack_fold_minss_commutabl
>  }
>
>  define <4 x float> @stack_fold_minss_int(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minss_int
> -  ;CHECK:       vminss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -1220,16 +2325,30 @@ define <4 x float> @stack_fold_minss_int
>  declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_movddup(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_movddup
> -  ;CHECK:   vmovddup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movddup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovddup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 0, i32 0>
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_movddup_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_movddup_ymm
> -  ;CHECK:   vmovddup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movddup_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovddup {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0,0,2,2]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
>    ret <4 x double> %2
> @@ -1242,80 +2361,144 @@ define <4 x double> @stack_fold_movddup_
>  ; TODO stack_fold_movlps (load / store)
>
>  define <4 x float> @stack_fold_movshdup(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movshdup
> -  ;CHECK:   vmovshdup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movshdup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_movshdup_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movshdup_ymm
> -  ;CHECK:   vmovshdup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movshdup_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[1,1,3,3,5,5,7,7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
>    ret <8 x float> %2
>  }
>
>  define <4 x float> @stack_fold_movsldup(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movsldup
> -  ;CHECK:   vmovsldup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movsldup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovsldup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,0,2,2]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_movsldup_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movsldup_ymm
> -  ;CHECK:   vmovsldup {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movsldup_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovsldup {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0,0,2,2,4,4,6,6]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_mulpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_mulpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd_ymm
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_mulps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_mulps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps_ymm
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_mulsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd
> -  ;CHECK:       vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd_int
> -  ;CHECK:       vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -1325,16 +2508,28 @@ define <2 x double> @stack_fold_mulsd_in
>  }
>
>  define float @stack_fold_mulss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss
> -  ;CHECK:       vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss_int
> -  ;CHECK:       vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -1344,8 +2539,16 @@ define <4 x float> @stack_fold_mulss_int
>  }
>
>  define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_orpd
> -  ;CHECK:       vorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -1357,8 +2560,16 @@ define <2 x double> @stack_fold_orpd(<2
>  }
>
>  define <4 x double> @stack_fold_orpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_orpd_ymm
> -  ;CHECK:       vorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -1370,8 +2581,16 @@ define <4 x double> @stack_fold_orpd_ymm
>  }
>
>  define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_orps
> -  ;CHECK:       vorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -1383,8 +2602,16 @@ define <4 x float> @stack_fold_orps(<4 x
>  }
>
>  define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_orps_ymm
> -  ;CHECK:       vorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <4 x i64>
>    %3 = bitcast <8 x float> %a1 to <4 x i64>
> @@ -1396,32 +2623,59 @@ define <8 x float> @stack_fold_orps_ymm(
>  }
>
>  define <8 x float> @stack_fold_perm2f128(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_perm2f128
> -  ;CHECK:   vperm2f128 $33, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_perm2f128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vperm2f128 $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[2,3],mem[0,1]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_permilpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilpd
> -  ;CHECK:   vpermilpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[1,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_permilpd_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilpd_ymm
> -  ;CHECK:   vpermilpd $5, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[1,0,3,2]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
>    ret <4 x double> %2
>  }
>
>  define <2 x double> @stack_fold_permilpdvar(<2 x double> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
>    ret <2 x double> %2
> @@ -1429,8 +2683,14 @@ define <2 x double> @stack_fold_permilpd
>  declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwind readnone
>
>  define <4 x double> @stack_fold_permilpdvar_ymm(<4 x double> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar_ymm
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
>    ret <4 x double> %2
> @@ -1438,24 +2698,44 @@ define <4 x double> @stack_fold_permilpd
>  declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) nounwind readnone
>
>  define <4 x float> @stack_fold_permilps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilps
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[3,2,1,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_permilps_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilps_ymm
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,1,0,7,6,5,4]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
>    ret <8 x float> %2
>  }
>
>  define <4 x float> @stack_fold_permilpsvar(<4 x float> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
>    ret <4 x float> %2
> @@ -1463,8 +2743,14 @@ define <4 x float> @stack_fold_permilpsv
>  declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind readnone
>
>  define <8 x float> @stack_fold_permilpsvar_ymm(<8 x float> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_ymm
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
>    ret <8 x float> %2
> @@ -1474,8 +2760,14 @@ declare <8 x float> @llvm.x86.avx.vpermi
>  ; TODO stack_fold_rcpps
>
>  define <4 x float> @stack_fold_rcpps_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rcpps_int
> -  ;CHECK:       vrcpps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rcpps_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vrcpps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
>    ret <4 x float> %2
> @@ -1485,8 +2777,14 @@ declare <4 x float> @llvm.x86.sse.rcp.ps
>  ; TODO stack_fold_rcpps_ymm
>
>  define <8 x float> @stack_fold_rcpps_ymm_int(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rcpps_ymm_int
> -  ;CHECK:       vrcpps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rcpps_ymm_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vrcpps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0)
>    ret <8 x float> %2
> @@ -1497,8 +2795,14 @@ declare <8 x float> @llvm.x86.avx.rcp.ps
>  ; TODO stack_fold_rcpss_int
>
>  define <2 x double> @stack_fold_roundpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundpd
> -  ;CHECK:  vroundpd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vroundpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
>    ret <2 x double> %2
> @@ -1506,8 +2810,14 @@ define <2 x double> @stack_fold_roundpd(
>  declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
>
>  define <4 x double> @stack_fold_roundpd_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundpd_ymm
> -  ;CHECK:  vroundpd $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vroundpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7)
>    ret <4 x double> %2
> @@ -1515,8 +2825,14 @@ define <4 x double> @stack_fold_roundpd_
>  declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind readnone
>
>  define <4 x float> @stack_fold_roundps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundps
> -  ;CHECK:  vroundps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vroundps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
>    ret <4 x float> %2
> @@ -1524,8 +2840,14 @@ define <4 x float> @stack_fold_roundps(<
>  declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
>
>  define <8 x float> @stack_fold_roundps_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundps_ymm
> -  ;CHECK:  vroundps $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vroundps $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7)
>    ret <8 x float> %2
> @@ -1533,8 +2855,15 @@ define <8 x float> @stack_fold_roundps_y
>  declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readnone
>
>  define double @stack_fold_roundsd(double %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_roundsd
> -  ;CHECK:       vroundsd $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
> +; CHECK-NEXT:    vroundsd $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call double @llvm.floor.f64(double %a0)
>    ret double %2
> @@ -1542,8 +2871,16 @@ define double @stack_fold_roundsd(double
>  declare double @llvm.floor.f64(double) nounwind readnone
>
>  define <2 x double> @stack_fold_roundsd_int(<2 x double> %a0, <2 x double> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_roundsd_int
> -  ;CHECK:       vroundsd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vroundsd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
>    ret <2 x double> %2
> @@ -1551,8 +2888,15 @@ define <2 x double> @stack_fold_roundsd_
>  declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
>
>  define float @stack_fold_roundss(float %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_roundss
> -  ;CHECK:       vroundss $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
> +; CHECK-NEXT:    vroundss $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call float @llvm.floor.f32(float %a0)
>    ret float %2
> @@ -1560,8 +2904,16 @@ define float @stack_fold_roundss(float %
>  declare float @llvm.floor.f32(float) nounwind readnone
>
>  define <4 x float> @stack_fold_roundss_int(<4 x float> %a0, <4 x float> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_roundss_int
> -  ;CHECK:       vroundss $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vroundss $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
>    ret <4 x float> %2
> @@ -1571,8 +2923,14 @@ declare <4 x float> @llvm.x86.sse41.roun
>  ; TODO stack_fold_rsqrtps
>
>  define <4 x float> @stack_fold_rsqrtps_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rsqrtps_int
> -  ;CHECK:       vrsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rsqrtps_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vrsqrtps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
>    ret <4 x float> %2
> @@ -1582,8 +2940,14 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
>  ; TODO stack_fold_rsqrtps_ymm
>
>  define <8 x float> @stack_fold_rsqrtps_ymm_int(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rsqrtps_ymm_int
> -  ;CHECK:       vrsqrtps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rsqrtps_ymm_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vrsqrtps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0)
>    ret <8 x float> %2
> @@ -1594,8 +2958,17 @@ declare <8 x float> @llvm.x86.avx.rsqrt.
>  ; TODO stack_fold_rsqrtss_int
>
>  define <2 x double> @stack_fold_shufpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufpd
> -  ;CHECK:       vshufpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[1],mem[0]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
>    ; fadd forces execution domain
> @@ -1604,8 +2977,17 @@ define <2 x double> @stack_fold_shufpd(<
>  }
>
>  define <4 x double> @stack_fold_shufpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufpd_ymm
> -  ;CHECK:       vshufpd $5, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[1],mem[0],ymm0[3],mem[2]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 4, i32 3, i32 6>
>    ; fadd forces execution domain
> @@ -1614,24 +2996,44 @@ define <4 x double> @stack_fold_shufpd_y
>  }
>
>  define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufps
> -  ;CHECK:       vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufps $200, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0,2],mem[0,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_shufps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufps_ymm
> -  ;CHECK:       vshufps $148, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufps $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0,1],mem[1,2],ymm0[4,5],mem[5,6]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 13, i32 14>
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_sqrtpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtpd
> -  ;CHECK:       vsqrtpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsqrtpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a0)
>    ret <2 x double> %2
> @@ -1639,8 +3041,14 @@ define <2 x double> @stack_fold_sqrtpd(<
>  declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
>
>  define <4 x double> @stack_fold_sqrtpd_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtpd_ymm
> -  ;CHECK:       vsqrtpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsqrtpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %a0)
>    ret <4 x double> %2
> @@ -1648,8 +3056,14 @@ define <4 x double> @stack_fold_sqrtpd_y
>  declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
>
>  define <4 x float> @stack_fold_sqrtps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtps
> -  ;CHECK:       vsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsqrtps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a0)
>    ret <4 x float> %2
> @@ -1657,8 +3071,14 @@ define <4 x float> @stack_fold_sqrtps(<4
>  declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
>
>  define <8 x float> @stack_fold_sqrtps_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtps_ymm
> -  ;CHECK:       vsqrtps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsqrtps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.sqrt.v8f32(<8 x float> %a0)
>    ret <8 x float> %2
> @@ -1666,8 +3086,15 @@ define <8 x float> @stack_fold_sqrtps_ym
>  declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
>
>  define double @stack_fold_sqrtsd(double %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_sqrtsd
> -  ;CHECK:       vsqrtsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
> +; CHECK-NEXT:    vsqrtsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call double @llvm.sqrt.f64(double %a0)
>    ret double %2
> @@ -1677,8 +3104,15 @@ declare double @llvm.sqrt.f64(double) no
>  ; TODO stack_fold_sqrtsd_int
>
>  define float @stack_fold_sqrtss(float %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_sqrtss
> -  ;CHECK:       vsqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
> +; CHECK-NEXT:    vsqrtss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call float @llvm.sqrt.f32(float %a0)
>    ret float %2
> @@ -1688,48 +3122,84 @@ declare float @llvm.sqrt.f32(float) noun
>  ; TODO stack_fold_sqrtss_int
>
>  define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd
> -  ;CHECK:       vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_subpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd_ymm
> -  ;CHECK:       vsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps
> -  ;CHECK:       vsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_subps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps_ymm
> -  ;CHECK:       vsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define double @stack_fold_subsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd
> -  ;CHECK:       vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd_int
> -  ;CHECK:       vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -1739,16 +3209,28 @@ define <2 x double> @stack_fold_subsd_in
>  }
>
>  define float @stack_fold_subss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_subss
> -  ;CHECK:       vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subss_int
> -  ;CHECK:       vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -1758,8 +3240,16 @@ define <4 x float> @stack_fold_subss_int
>  }
>
>  define i32 @stack_fold_testpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_testpd
> -  ;CHECK:       vtestpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_testpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vtestpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1)
>    ret i32 %2
> @@ -1767,8 +3257,17 @@ define i32 @stack_fold_testpd(<2 x doubl
>  declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define i32 @stack_fold_testpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_testpd_ymm
> -  ;CHECK:       vtestpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_testpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vtestpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret i32 %2
> @@ -1776,8 +3275,16 @@ define i32 @stack_fold_testpd_ymm(<4 x d
>  declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define i32 @stack_fold_testps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_testps
> -  ;CHECK:       vtestps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_testps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vtestps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1)
>    ret i32 %2
> @@ -1785,8 +3292,17 @@ define i32 @stack_fold_testps(<4 x float
>  declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define i32 @stack_fold_testps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_testps_ymm
> -  ;CHECK:       vtestps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_testps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vtestps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret i32 %2
> @@ -1794,8 +3310,17 @@ define i32 @stack_fold_testps_ymm(<8 x f
>  declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define i32 @stack_fold_ucomisd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomisd
> -  ;CHECK:       vucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomisd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vucomisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    sete %al
> +; CHECK-NEXT:    leal -1(%rax,%rax), %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ueq double %a0, %a1
>    %3 = select i1 %2, i32 1, i32 -1
> @@ -1803,8 +3328,18 @@ define i32 @stack_fold_ucomisd(double %a
>  }
>
>  define i32 @stack_fold_ucomisd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomisd_int
> -  ;CHECK:       vucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomisd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vucomisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
>    ret i32 %2
> @@ -1812,8 +3347,17 @@ define i32 @stack_fold_ucomisd_int(<2 x
>  declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define i32 @stack_fold_ucomiss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomiss
> -  ;CHECK:       vucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomiss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    sete %al
> +; CHECK-NEXT:    leal -1(%rax,%rax), %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ueq float %a0, %a1
>    %3 = select i1 %2, i32 1, i32 -1
> @@ -1821,8 +3365,18 @@ define i32 @stack_fold_ucomiss(float %a0
>  }
>
>  define i32 @stack_fold_ucomiss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomiss_int
> -  ;CHECK:       vucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomiss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
>    ret i32 %2
> @@ -1830,8 +3384,17 @@ define i32 @stack_fold_ucomiss_int(<4 x
>  declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_unpckhpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhpd
> -  ;CHECK:       vunpckhpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[1],mem[1]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
>    ; fadd forces execution domain
> @@ -1840,8 +3403,17 @@ define <2 x double> @stack_fold_unpckhpd
>  }
>
>  define <4 x double> @stack_fold_unpckhpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhpd_ymm
> -  ;CHECK:       vunpckhpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
>    ; fadd forces execution domain
> @@ -1850,8 +3422,17 @@ define <4 x double> @stack_fold_unpckhpd
>  }
>
>  define <4 x float> @stack_fold_unpckhps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhps
> -  ;CHECK:       vunpckhps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
>    ; fadd forces execution domain
> @@ -1860,8 +3441,17 @@ define <4 x float> @stack_fold_unpckhps(
>  }
>
>  define <8 x float> @stack_fold_unpckhps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhps_ymm
> -  ;CHECK:       vunpckhps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpckhps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
>    ; fadd forces execution domain
> @@ -1870,8 +3460,17 @@ define <8 x float> @stack_fold_unpckhps_
>  }
>
>  define <2 x double> @stack_fold_unpcklpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklpd
> -  ;CHECK:       vunpcklpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
>    ; fadd forces execution domain
> @@ -1880,8 +3479,17 @@ define <2 x double> @stack_fold_unpcklpd
>  }
>
>  define <4 x double> @stack_fold_unpcklpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklpd_ymm
> -  ;CHECK:       vunpcklpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
>    ; fadd forces execution domain
> @@ -1890,8 +3498,17 @@ define <4 x double> @stack_fold_unpcklpd
>  }
>
>  define <4 x float> @stack_fold_unpcklps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklps
> -  ;CHECK:       vunpcklps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
>    ; fadd forces execution domain
> @@ -1900,8 +3517,17 @@ define <4 x float> @stack_fold_unpcklps(
>  }
>
>  define <8 x float> @stack_fold_unpcklps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklps_ymm
> -  ;CHECK:       vunpcklps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vunpcklps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
>    ; fadd forces execution domain
> @@ -1910,8 +3536,16 @@ define <8 x float> @stack_fold_unpcklps_
>  }
>
>  define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorpd
> -  ;CHECK:       vxorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -1923,8 +3557,16 @@ define <2 x double> @stack_fold_xorpd(<2
>  }
>
>  define <4 x double> @stack_fold_xorpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorpd_ymm
> -  ;CHECK:       vxorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -1936,8 +3578,16 @@ define <4 x double> @stack_fold_xorpd_ym
>  }
>
>  define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorps
> -  ;CHECK:       vxorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -1949,8 +3599,16 @@ define <4 x float> @stack_fold_xorps(<4
>  }
>
>  define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorps_ymm
> -  ;CHECK:       vxorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <4 x i64>
>    %3 = bitcast <8 x float> %a1 to <4 x i64>
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,16 +10,31 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <8 x double> @stack_fold_addpd_zmm(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd_zmm
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <8 x double> %a0, %a1
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_addpd_zmm_k(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_addpd_zmm_k:
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_zmm_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <8 x double> %a0, %a1
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -28,8 +44,17 @@ define <8 x double> @stack_fold_addpd_zm
>  }
>
>  define <8 x double> @stack_fold_addpd_zmm_k_commuted(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_addpd_zmm_k_commuted:
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_zmm_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <8 x double> %a1, %a0
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -39,8 +64,15 @@ define <8 x double> @stack_fold_addpd_zm
>  }
>
>  define <8 x double> @stack_fold_addpd_zmm_kz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_addpd_zmm_kz
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_zmm_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <8 x double> %a1, %a0
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -49,16 +81,31 @@ define <8 x double> @stack_fold_addpd_zm
>  }
>
>  define <16 x float> @stack_fold_addps_zmm(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps_zmm
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <16 x float> %a0, %a1
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_addps_zmm_k(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_addps_zmm_k:
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_zmm_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <16 x float> %a0, %a1
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -68,8 +115,17 @@ define <16 x float> @stack_fold_addps_zm
>  }
>
>  define <16 x float> @stack_fold_addps_zmm_k_commuted(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_addps_zmm_k_commuted:
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_zmm_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <16 x float> %a1, %a0
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -79,8 +135,15 @@ define <16 x float> @stack_fold_addps_zm
>  }
>
>  define <16 x float> @stack_fold_addps_zmm_kz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_addps_zmm_kz
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_zmm_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <16 x float> %a1, %a0
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -89,16 +152,28 @@ define <16 x float> @stack_fold_addps_zm
>  }
>
>  define double @stack_fold_addsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd
> -  ;CHECK:       vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd_int
> -  ;CHECK:       vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -108,16 +183,28 @@ define <2 x double> @stack_fold_addsd_in
>  }
>
>  define float @stack_fold_addss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_addss
> -  ;CHECK:       vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addss_int
> -  ;CHECK:       vaddss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -127,8 +214,16 @@ define <4 x float> @stack_fold_addss_int
>  }
>
>  define <8 x double> @stack_fold_andnpd_zmm(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd_zmm
> -  ;CHECK:       vandnpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x double> %a0 to <8 x i64>
>    %3 = bitcast <8 x double> %a1 to <8 x i64>
> @@ -141,8 +236,16 @@ define <8 x double> @stack_fold_andnpd_z
>  }
>
>  define <16 x float> @stack_fold_andnps_zmm(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps_zmm
> -  ;CHECK:       vandnps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <16 x float> %a0 to <16 x i32>
>    %3 = bitcast <16 x float> %a1 to <16 x i32>
> @@ -155,8 +258,16 @@ define <16 x float> @stack_fold_andnps_z
>  }
>
>  define <8 x double> @stack_fold_andpd_zmm(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd_zmm
> -  ;CHECK:       vandpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x double> %a0 to <8 x i64>
>    %3 = bitcast <8 x double> %a1 to <8 x i64>
> @@ -168,8 +279,16 @@ define <8 x double> @stack_fold_andpd_zm
>  }
>
>  define <16 x float> @stack_fold_andps_zmm(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps_zmm
> -  ;CHECK:       vandps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <16 x float> %a0 to <16 x i32>
>    %3 = bitcast <16 x float> %a1 to <16 x i32>
> @@ -181,8 +300,17 @@ define <16 x float> @stack_fold_andps_zm
>  }
>
>  define i8 @stack_fold_cmppd(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-9]}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 # 64-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $al killed $al killed $eax
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a0, <8 x double> %a1, i32 0, i32 4)
>    %2 = bitcast <8 x i1> %res to i8
> @@ -191,8 +319,26 @@ define i8 @stack_fold_cmppd(<8 x double>
>  declare <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double>, <8 x double>, i32, i32)
>
>  define <8 x double> @stack_fold_cmppd_mask(<8 x double> %a0, <8 x double> %a1, <8 x double>* %a2, i8 %mask, <8 x double> %b0, <8 x double> %b1) {
> -  ;CHECK-LABEL: stack_fold_cmppd_mask:
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 192
> +; CHECK-NEXT:    vmovups %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm2, (%rsp) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vaddpd (%rdi), %zmm0, %zmm0
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vblendmpd (%rsp), %zmm0, %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    addq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load and fadd are here to keep the operations below the side effecting block and to avoid folding the wrong load
>    %2 = load <8 x double>, <8 x double>* %a2
> @@ -205,8 +351,26 @@ define <8 x double> @stack_fold_cmppd_ma
>  }
>
>  define <8 x double> @stack_fold_cmppd_mask_commuted(<8 x double> %a0, <8 x double> %a1, <8 x double>* %a2, i8 %mask, <8 x double> %b0, <8 x double> %b1) {
> -  ;CHECK-LABEL: stack_fold_cmppd_mask_commuted:
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd_mask_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 192
> +; CHECK-NEXT:    vmovups %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm2, (%rsp) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vaddpd (%rdi), %zmm0, %zmm0
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vblendmpd (%rsp), %zmm0, %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    addq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load and fadd are here to keep the operations below the side effecting block and to avoid folding the wrong load
>    %2 = load <8 x double>, <8 x double>* %a2
> @@ -219,8 +383,17 @@ define <8 x double> @stack_fold_cmppd_ma
>  }
>
>  define i16 @stack_fold_cmpps(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 # 64-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <16 x i1> @llvm.x86.avx512.cmp.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0, i32 4)
>    %2 = bitcast <16 x i1> %res to i16
> @@ -229,8 +402,26 @@ define i16 @stack_fold_cmpps(<16 x float
>  declare <16 x i1> @llvm.x86.avx512.cmp.ps.512(<16 x float>, <16 x float>, i32, i32)
>
>  define <16 x float> @stack_fold_cmpps_mask(<16 x float> %a0, <16 x float> %a1, <16 x float>* %a2, i16 %mask, <16 x float> %b0, <16 x float> %b1) {
> -  ;CHECK-LABEL: stack_fold_cmpps_mask:
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 192
> +; CHECK-NEXT:    vmovups %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm2, (%rsp) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vaddps (%rdi), %zmm0, %zmm0
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vblendmps (%rsp), %zmm0, %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    addq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load and fadd are here to keep the operations below the side effecting block and to avoid folding the wrong load
>    %2 = load <16 x float>, <16 x float>* %a2
> @@ -243,8 +434,26 @@ define <16 x float> @stack_fold_cmpps_ma
>  }
>
>  define <16 x float> @stack_fold_cmpps_mask_commuted(<16 x float> %a0, <16 x float> %a1, <16 x float>* %a2, i16 %mask, <16 x float> %b0, <16 x float> %b1) {
> -  ;CHECK-LABEL: stack_fold_cmpps_mask_commuted:
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%k[0-7]}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps_mask_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 192
> +; CHECK-NEXT:    vmovups %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm2, (%rsp) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vaddps (%rdi), %zmm0, %zmm0
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vblendmps (%rsp), %zmm0, %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    addq $184, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ; load and fadd are here to keep the operations below the side effecting block and to avoid folding the wrong load
>    %2 = load <16 x float>, <16 x float>* %a2
> @@ -257,8 +466,14 @@ define <16 x float> @stack_fold_cmpps_ma
>  }
>
>  define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divsd_int
> -  ;CHECK:       vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -268,16 +483,28 @@ define <2 x double> @stack_fold_divsd_in
>  }
>
>  define float @stack_fold_divss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_divss
> -  ;CHECK:       vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fdiv float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divss_int
> -  ;CHECK:       vdivss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -287,32 +514,56 @@ define <4 x float> @stack_fold_divss_int
>  }
>
>  define <8 x double> @stack_fold_cvtdq2pd(<8 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = sitofp <8 x i32> %a0 to <8 x double>
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_cvtudq2pd(<8 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtudq2pd
> -  ;CHECK:   vcvtudq2pd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtudq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtudq2pd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = uitofp <8 x i32> %a0 to <8 x double>
>    ret <8 x double> %2
>  }
>
>  define <8 x float> @stack_fold_cvtpd2ps(<8 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps
> -  ;CHECK:   vcvtpd2ps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fptrunc <8 x double> %a0 to <8 x float>
>    ret <8 x float> %2
>  }
>
>  define <16 x float> @stack_fold_cvtph2ps(<16 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtph2ps
> -  ;CHECK:   vcvtph2ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtph2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> undef, i16 -1, i32 4)
>    ret <16 x float> %2
> @@ -320,8 +571,14 @@ define <16 x float> @stack_fold_cvtph2ps
>  declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float>, i16, i32) nounwind readonly
>
>  define <16 x i16> @stack_fold_cvtps2ph(<16 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2ph
> -  ;CHECK:   vcvtps2ph $0, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
> +; CHECK-LABEL: stack_fold_cvtps2ph:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vcvtps2ph $0, %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    retq
>    %1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 0, <16 x i16> undef, i16 -1)
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <16 x i16> %1
> @@ -329,9 +586,15 @@ define <16 x i16> @stack_fold_cvtps2ph(<
>  declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x i16>, i16) nounwind readonly
>
>  define <4 x float> @stack_fold_insertps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertps
> -  ;CHECK:       vinsertps $17, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> -  ;CHECK-NEXT:                                                                              {{.*#+}} xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-LABEL: stack_fold_insertps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertps $17, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 209)
>    ret <4 x float> %2
> @@ -339,8 +602,14 @@ define <4 x float> @stack_fold_insertps(
>  declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define <8 x double> @stack_fold_maxpd_zmm(<8 x double> %a0, <8 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd_zmm
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    ret <8 x double> %2
> @@ -348,16 +617,31 @@ define <8 x double> @stack_fold_maxpd_zm
>  declare <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double>, <8 x double>, i32) nounwind readnone
>
>  define <8 x double> @stack_fold_maxpd_zmm_commutable(<8 x double> %a0, <8 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_zmm_commutable
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_zmm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_maxpd_zmm_commutable_k(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_zmm_commutable_k:
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_zmm_commutable_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -367,8 +651,17 @@ define <8 x double> @stack_fold_maxpd_zm
>  }
>
>  define <8 x double> @stack_fold_maxpd_zmm_commutable_k_commuted(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_zmm_commutable_k_commuted:
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_zmm_commutable_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a1, <8 x double> %a0, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -378,8 +671,15 @@ define <8 x double> @stack_fold_maxpd_zm
>  }
>
>  define <8 x double> @stack_fold_maxpd_zmm_commutable_kz(<8 x double> %a0, <8 x double> %a1, i8 %mask) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_zmm_commutable_kz
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_zmm_commutable_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a1, <8 x double> %a0, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -388,8 +688,14 @@ define <8 x double> @stack_fold_maxpd_zm
>  }
>
>  define <16 x float> @stack_fold_maxps_zmm(<16 x float> %a0, <16 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps_zmm
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    ret <16 x float> %2
> @@ -397,16 +703,31 @@ define <16 x float> @stack_fold_maxps_zm
>  declare <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float>, <16 x float>, i32) nounwind readnone
>
>  define <16 x float> @stack_fold_maxps_zmm_commutable(<16 x float> %a0, <16 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_zmm_commutable
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_zmm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_maxps_zmm_commutable_k(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_zmm_commutable_k:
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_zmm_commutable_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -416,8 +737,17 @@ define <16 x float> @stack_fold_maxps_zm
>  }
>
>  define <16 x float> @stack_fold_maxps_zmm_commutable_k_commuted(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_zmm_commutable_k_commuted:
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_zmm_commutable_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a1, <16 x float> %a0, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -427,8 +757,15 @@ define <16 x float> @stack_fold_maxps_zm
>  }
>
>  define <16 x float> @stack_fold_maxps_zmm_commutable_kz(<16 x float> %a0, <16 x float> %a1, i16 %mask) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_zmm_commutable_kz
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_zmm_commutable_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a1, <16 x float> %a0, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -437,8 +774,14 @@ define <16 x float> @stack_fold_maxps_zm
>  }
>
>  define <8 x double> @stack_fold_minpd_zmm(<8 x double> %a0, <8 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minpd_zmm
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    ret <8 x double> %2
> @@ -446,16 +789,31 @@ define <8 x double> @stack_fold_minpd_zm
>  declare <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double>, <8 x double>, i32) nounwind readnone
>
>  define <8 x double> @stack_fold_minpd_zmm_commutable(<8 x double> %a0, <8 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_zmm_commutable
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_zmm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_minpd_zmm_commutable_k(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_zmm_commutable_k:
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_zmm_commutable_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -465,8 +823,17 @@ define <8 x double> @stack_fold_minpd_zm
>  }
>
>  define <8 x double> @stack_fold_minpd_zmm_commutable_k_commuted(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_zmm_commutable_k_commuted:
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_zmm_commutable_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a1, <8 x double> %a0, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -476,8 +843,15 @@ define <8 x double> @stack_fold_minpd_zm
>  }
>
>  define <8 x double> @stack_fold_minpd_zmm_commutable_kz(<8 x double> %a0, <8 x double> %a1, i8 %mask) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_zmm_commutable_kz
> -  ;CHECK:       vminpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_zmm_commutable_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vminpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a1, <8 x double> %a0, i32 4)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -486,8 +860,14 @@ define <8 x double> @stack_fold_minpd_zm
>  }
>
>  define <16 x float> @stack_fold_minps_zmm(<16 x float> %a0, <16 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps_zmm
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    ret <16 x float> %2
> @@ -495,16 +875,31 @@ define <16 x float> @stack_fold_minps_zm
>  declare <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float>, <16 x float>, i32) nounwind readnone
>
>  define <16 x float> @stack_fold_minps_zmm_commutable(<16 x float> %a0, <16 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_zmm_commutable
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_zmm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_minps_zmm_commutable_k(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_zmm_commutable_k:
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_zmm_commutable_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -514,8 +909,17 @@ define <16 x float> @stack_fold_minps_zm
>  }
>
>  define <16 x float> @stack_fold_minps_zmm_commutable_k_commuted(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_zmm_commutable_k_commuted:
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_zmm_commutable_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a1, <16 x float> %a0, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -525,8 +929,15 @@ define <16 x float> @stack_fold_minps_zm
>  }
>
>  define <16 x float> @stack_fold_minps_zmm_commutable_kz(<16 x float> %a0, <16 x float> %a1, i16 %mask) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_zmm_commutable_kz
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_zmm_commutable_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a1, <16 x float> %a0, i32 4)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -535,16 +946,31 @@ define <16 x float> @stack_fold_minps_zm
>  }
>
>  define <8 x double> @stack_fold_mulpd_zmm(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd_zmm
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <8 x double> %a0, %a1
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_mulpd_zmm_k(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_mulpd_zmm_k:
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_zmm_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <8 x double> %a0, %a1
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -554,8 +980,17 @@ define <8 x double> @stack_fold_mulpd_zm
>  }
>
>  define <8 x double> @stack_fold_mulpd_zmm_k_commuted(<8 x double> %a0, <8 x double> %a1, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_mulpd_zmm_k_commuted:
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_zmm_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm2
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <8 x double> %a1, %a0
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -565,8 +1000,15 @@ define <8 x double> @stack_fold_mulpd_zm
>  }
>
>  define <8 x double> @stack_fold_mulpd_zmm_kz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_mulpd_zmm_kz
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_zmm_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <8 x double> %a1, %a0
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -575,16 +1017,31 @@ define <8 x double> @stack_fold_mulpd_zm
>  }
>
>  define <16 x float> @stack_fold_mulps_zmm(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps_zmm
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <16 x float> %a0, %a1
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_mulps_zmm_k(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_mulps_zmm_k:
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_zmm_k:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <16 x float> %a0, %a1
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -594,8 +1051,17 @@ define <16 x float> @stack_fold_mulps_zm
>  }
>
>  define <16 x float> @stack_fold_mulps_zmm_k_commuted(<16 x float> %a0, <16 x float> %a1, i16 %mask, <16 x float>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_mulps_zmm_k_commuted:
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_zmm_k_commuted:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm2
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <16 x float> %a1, %a0
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -605,8 +1071,15 @@ define <16 x float> @stack_fold_mulps_zm
>  }
>
>  define <16 x float> @stack_fold_mulps_zmm_kz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_mulps_zmm_kz
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_zmm_kz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <16 x float> %a1, %a0
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -615,16 +1088,28 @@ define <16 x float> @stack_fold_mulps_zm
>  }
>
>  define double @stack_fold_mulsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd
> -  ;CHECK:       vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd_int
> -  ;CHECK:       vmulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -634,16 +1119,28 @@ define <2 x double> @stack_fold_mulsd_in
>  }
>
>  define float @stack_fold_mulss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss
> -  ;CHECK:       vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss_int
> -  ;CHECK:       vmulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -653,8 +1150,16 @@ define <4 x float> @stack_fold_mulss_int
>  }
>
>  define <8 x double> @stack_fold_orpd_zmm(<8 x double> %a0, <8 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_orpd_zmm
> -  ;CHECK:       vorpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x double> %a0 to <8 x i64>
>    %3 = bitcast <8 x double> %a1 to <8 x i64>
> @@ -666,8 +1171,16 @@ define <8 x double> @stack_fold_orpd_zmm
>  }
>
>  define <16 x float> @stack_fold_orps_zmm(<16 x float> %a0, <16 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_orps_zmm
> -  ;CHECK:       vorps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <16 x float> %a0 to <16 x i32>
>    %3 = bitcast <16 x float> %a1 to <16 x i32>
> @@ -679,16 +1192,45 @@ define <16 x float> @stack_fold_orps_zmm
>  }
>
>  define <8 x double> @stack_fold_shuff64x2(<8 x double> %a, <8 x double> %b) {
> -  ;CHECK-LABEL: stack_fold_shuff64x2
> -  ;CHECK:   vshuff64x2 $24, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff64x2:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 64
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vshuff64x2 $24, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 = zmm0[0,1,4,5],mem[2,3,0,1]
> +; CHECK-NEXT:    addq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9>
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_shuff64x2_mask(<8 x double> %a, <8 x double> %b, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_shuff64x2_mask
> -  ;CHECK:   vshuff64x2 $24, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff64x2_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 64
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovapd (%rsi), %zmm1
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vshuff64x2 $24, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm1 {%k1} = zmm0[0,1,4,5],mem[2,3,0,1]
> +; CHECK-NEXT:    vmovapd %zmm1, %zmm0
> +; CHECK-NEXT:    addq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -699,8 +1241,22 @@ define <8 x double> @stack_fold_shuff64x
>  }
>
>  define <8 x double> @stack_fold_shuff64x2_maskz(<8 x double> %a, <8 x double> %b, i8 %mask, <8 x double>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_shuff64x2_maskz
> -  ;CHECK:   vshuff64x2 $24, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff64x2_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 64
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vshuff64x2 $24, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} {z} = zmm0[0,1,4,5],mem[2,3,0,1]
> +; CHECK-NEXT:    addq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -709,8 +1265,24 @@ define <8 x double> @stack_fold_shuff64x
>  }
>
>  define <16 x float> @stack_fold_shuff32x4_mask(<16 x float> %a, <16 x float> %b, i16 %mask, <16 x float>* %passthru) {
> -  ;CHECK-LABEL: stack_fold_shuff32x4_mask
> -  ;CHECK:   vshuff32x4 $20, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff32x4_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 64
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovaps (%rsi), %zmm1
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vshuff32x4 $20, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm1 {%k1} = zmm0[0,1,2,3,4,5,6,7],mem[4,5,6,7,0,1,2,3]
> +; CHECK-NEXT:    vmovaps %zmm1, %zmm0
> +; CHECK-NEXT:    addq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -721,8 +1293,22 @@ define <16 x float> @stack_fold_shuff32x
>  }
>
>  define <16 x float> @stack_fold_shuff32x4_maskz(<16 x float> %a, <16 x float> %b, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_shuff32x4_maskz
> -  ;CHECK:   vshuff32x4 $20, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff32x4_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    subq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 64
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
> +; CHECK-NEXT:    vshuff32x4 $20, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,7],mem[4,5,6,7,0,1,2,3]
> +; CHECK-NEXT:    addq $56, %rsp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 16, i32 17, i32 18, i32 19>
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -731,32 +1317,56 @@ define <16 x float> @stack_fold_shuff32x
>  }
>
>  define <8 x double> @stack_fold_subpd_zmm(<8 x double> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd_zmm
> -  ;CHECK:       vsubpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <8 x double> %a0, %a1
>    ret <8 x double> %2
>  }
>
>  define <16 x float> @stack_fold_subps_zmm(<16 x float> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps_zmm
> -  ;CHECK:       vsubps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <16 x float> %a0, %a1
>    ret <16 x float> %2
>  }
>
>  define double @stack_fold_subsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd
> -  ;CHECK:       vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd_int
> -  ;CHECK:       vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -766,16 +1376,28 @@ define <2 x double> @stack_fold_subsd_in
>  }
>
>  define float @stack_fold_subss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_subss
> -  ;CHECK:       vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subss_int
> -  ;CHECK:       vsubss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -785,8 +1407,16 @@ define <4 x float> @stack_fold_subss_int
>  }
>
>  define <8 x double> @stack_fold_xorpd_zmm(<8 x double> %a0, <8 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_xorpd_zmm
> -  ;CHECK:       vxorpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x double> %a0 to <8 x i64>
>    %3 = bitcast <8 x double> %a1 to <8 x i64>
> @@ -798,8 +1428,16 @@ define <8 x double> @stack_fold_xorpd_zm
>  }
>
>  define <16 x float> @stack_fold_xorps_zmm(<16 x float> %a0, <16 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_xorps_zmm
> -  ;CHECK:       vxorps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <16 x float> %a0 to <16 x i32>
>    %3 = bitcast <16 x float> %a1 to <16 x i32>
> @@ -811,9 +1449,44 @@ define <16 x float> @stack_fold_xorps_zm
>  }
>
>  define i32 @stack_fold_extractps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_extractps
> -  ;CHECK:       vextractps $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
> -  ;CHECK:       movl    {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
> +; CHECK-LABEL: stack_fold_extractps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vextractps $1, %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = extractelement <4 x float> %a0, i32 1
>    %2 = bitcast float %1 to i32
>    %3 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
> @@ -821,56 +1494,106 @@ define i32 @stack_fold_extractps(<4 x fl
>  }
>
>  define <4 x float> @stack_fold_extracti32x4(<16 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_extracti32x4
> -  ;CHECK:       vextractf32x4 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extracti32x4:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf32x4 $3, %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <16 x float> %a0, <16 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <4 x float> %1
>  }
>
>  define <2 x double> @stack_fold_extractf64x2(<8 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_extractf64x2
> -  ;CHECK:       vextractf32x4 $3, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extractf64x2:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf32x4 $3, %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <8 x double> %a0, <8 x double> undef, <2 x i32> <i32 6, i32 7>
>    %2 = tail call <2 x double> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <2 x double> %1
>  }
>
>  define <8 x float> @stack_fold_extracti32x8(<16 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_extracti32x8
> -  ;CHECK:       vextractf64x4 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extracti32x8:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf64x4 $1, %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <16 x float> %a0, <16 x float> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <8 x float> %1
>  }
>
>  define <4 x double> @stack_fold_extractf64x4(<8 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_extractf64x4
> -  ;CHECK:       vextractf64x4 $1, {{%zmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 32-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extractf64x4:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf64x4 $1, %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <8 x double> %a0, <8 x double> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
>    %2 = tail call <2 x double> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <4 x double> %1
>  }
>
>  define <16 x float> @stack_fold_insertf32x8(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertf32x8
> -  ;CHECK:       vinsertf64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf32x8:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
>    ret <16 x float> %2
>  }
>
>  define <8 x double> @stack_fold_insertf64x4(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertf64x4
> -  ;CHECK:       vinsertf64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf64x4:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_insertf64x4_mask(<8 x double>* %passthru, <4 x double> %a0, <4 x double> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_insertf64x4_mask
> -  ;CHECK:       vinsertf64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf64x4_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovapd (%rdi), %zmm2
> +; CHECK-NEXT:    vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 32-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -880,8 +1603,16 @@ define <8 x double> @stack_fold_insertf6
>  }
>
>  define <8 x double> @stack_fold_insertf64x4_maskz(<4 x double> %a0, <4 x double> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_insertf64x4_maskz
> -  ;CHECK:       vinsertf64x4 $1, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf64x4_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -890,8 +1621,14 @@ define <8 x double> @stack_fold_insertf6
>  }
>
>  define <16 x float> @stack_fold_vpermt2ps(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2ps
> -  ;CHECK:       vpermt2ps {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2)
>    ret <16 x float> %2
> @@ -899,16 +1636,31 @@ define <16 x float> @stack_fold_vpermt2p
>  declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32>, <16 x float>)
>
>  define <16 x float> @stack_fold_vpermi2ps(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2ps
> -  ;CHECK:       vpermi2ps {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0, <16 x float> %x2)
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_vpermi2ps_mask(<16 x float> %x0, <16 x i32>* %x1, <16 x float> %x2, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_vpermi2ps_mask
> -  ;CHECK:       vpermi2ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2ps_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm2
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vpermi2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %x1b = load <16 x i32>, <16 x i32>* %x1
>    %2 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1b, <16 x float> %x2)
> @@ -919,8 +1671,16 @@ define <16 x float> @stack_fold_vpermi2p
>  }
>
>  define <16 x float> @stack_fold_vpermt2ps_mask(<16 x i32>* %x0, <16 x float> %x1, <16 x float> %x2, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_vpermt2ps_mask
> -  ;CHECK:       vpermt2ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2ps_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm1
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vpermt2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %x0b = load <16 x i32>, <16 x i32>* %x0
>    %2 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0b, <16 x float> %x2)
> @@ -930,8 +1690,16 @@ define <16 x float> @stack_fold_vpermt2p
>  }
>
>  define <16 x float> @stack_fold_vpermt2ps_maskz(<16 x i32>* %x0, <16 x float> %x1, <16 x float> %x2, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_vpermt2ps_maskz
> -  ;CHECK:       vpermt2ps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2ps_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm1
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vpermt2ps {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %x0b = load <16 x i32>, <16 x i32>* %x0
>    %2 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0b, <16 x float> %x2)
> @@ -941,8 +1709,14 @@ define <16 x float> @stack_fold_vpermt2p
>  }
>
>  define <8 x double> @stack_fold_vpermt2pd(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2pd
> -  ;CHECK:       vpermt2pd {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2pd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2)
>    %3 = bitcast <8 x i64> %x1 to <8 x double>
> @@ -951,16 +1725,31 @@ define <8 x double> @stack_fold_vpermt2p
>  declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>, <8 x double>)
>
>  define <8 x double> @stack_fold_vpermi2pd(<8 x i64> %x0, <8 x double> %x1, <8 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2pd
> -  ;CHECK:       vpermi2pd {{-?[0-9]*}}(%rsp), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2pd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x1, <8 x i64> %x0, <8 x double> %x2)
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_permpd(<8 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permpd
> -  ;CHECK:   vpermpd $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermpd $235, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 = mem[3,2,2,3,7,6,6,7]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
>    ; fadd forces execution domain
> @@ -969,8 +1758,19 @@ define <8 x double> @stack_fold_permpd(<
>  }
>
>  define <8 x double> @stack_fold_permpd_mask(<8 x double>* %passthru, <8 x double> %a0, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permpd_mask
> -  ;CHECK:   vpermpd $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpd_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovapd (%rdi), %zmm0
> +; CHECK-NEXT:    vpermpd $235, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} = mem[3,2,2,3,7,6,6,7]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -983,8 +1783,16 @@ define <8 x double> @stack_fold_permpd_m
>  }
>
>  define <8 x double> @stack_fold_permpd_maskz(<8 x double> %a0, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permpd_maskz
> -  ;CHECK:   vpermpd $235, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpd_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermpd $235, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} {z} = mem[3,2,2,3,7,6,6,7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 3, i32 2, i32 2, i32 3, i32 7, i32 6, i32 6, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -993,8 +1801,16 @@ define <8 x double> @stack_fold_permpd_m
>  }
>
>  define <8 x double> @stack_fold_permpdvar(<8 x i64> %a0, <8 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_permpdvar
> -  ;CHECK:   vpermpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpdvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %zmm1, %zmm0, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %a1, <8 x i64> %a0)
>    ; fadd forces execution domain
> @@ -1004,8 +1820,14 @@ define <8 x double> @stack_fold_permpdva
>  declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>) nounwind readonly
>
>  define <16 x float> @stack_fold_permps(<16 x i32> %a0, <16 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_permps
> -  ;CHECK:       vpermps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %a1, <16 x i32> %a0)
>    ret <16 x float> %2
> @@ -1013,16 +1835,33 @@ define <16 x float> @stack_fold_permps(<
>  declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>) nounwind readonly
>
>  define <8 x double> @stack_fold_permilpd_zmm(<8 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilpd_zmm
> -  ;CHECK:   vpermilpd $85, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 = mem[1,0,3,2,5,4,7,6]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
>    ret <8 x double> %2
>  }
>
>  define <8 x double> @stack_fold_permilpd_zmm_mask(<8 x double>* %passthru, <8 x double> %a0, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpd_zmm_mask
> -  ;CHECK:   vpermilpd $85, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd_zmm_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovapd (%rdi), %zmm1
> +; CHECK-NEXT:    vpermilpd $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm1 {%k1} = mem[1,0,3,2,5,4,7,6]
> +; CHECK-NEXT:    vmovapd %zmm1, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -1033,8 +1872,16 @@ define <8 x double> @stack_fold_permilpd
>  }
>
>  define <8 x double> @stack_fold_permilpd_zmm_maskz(<8 x double> %a0, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpd_zmm_maskz
> -  ;CHECK:   vpermilpd $85, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd_zmm_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermilpd $85, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} {z} = mem[1,0,3,2,5,4,7,6]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -1043,8 +1890,14 @@ define <8 x double> @stack_fold_permilpd
>  }
>
>  define <8 x double> @stack_fold_permilpdvar_zmm(<8 x double> %a0, <8 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar_zmm
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %a0, <8 x i64> %a1)
>    ret <8 x double> %2
> @@ -1052,8 +1905,17 @@ define <8 x double> @stack_fold_permilpd
>  declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>) nounwind readnone
>
>  define <8 x double> @stack_fold_permilpdvar_zmm_mask(<8 x double>* %passthru, <8 x double> %a0, <8 x i64> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar_zmm_mask
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar_zmm_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovapd (%rdi), %zmm2
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovapd %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %a0, <8 x i64> %a1)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -1064,8 +1926,15 @@ define <8 x double> @stack_fold_permilpd
>  }
>
>  define <8 x double> @stack_fold_permilpdvar_zmm_maskz(<8 x double> %a0, <8 x i64> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar_zmm_maskz
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar_zmm_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %a0, <8 x i64> %a1)
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -1074,16 +1943,33 @@ define <8 x double> @stack_fold_permilpd
>  }
>
>  define <16 x float> @stack_fold_permilps_zmm(<16 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilps_zmm
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
>    ret <16 x float> %2
>  }
>
>  define <16 x float> @stack_fold_permilps_zmm_mask(<16 x float>* %passthru, <16 x float> %a0, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilps_zmm_mask
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps_zmm_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm1
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm1 {%k1} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
> +; CHECK-NEXT:    vmovaps %zmm1, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -1094,8 +1980,16 @@ define <16 x float> @stack_fold_permilps
>  }
>
>  define <16 x float> @stack_fold_permilps_zmm_maskz(<16 x float> %a0, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilps_zmm_maskz
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps_zmm_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    # zmm0 {%k1} {z} = mem[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -1104,8 +1998,14 @@ define <16 x float> @stack_fold_permilps
>  }
>
>  define <16 x float> @stack_fold_permilpsvar_zmm(<16 x float> %a0, <16 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_zmm
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_zmm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %a1)
>    ret <16 x float> %2
> @@ -1113,8 +2013,17 @@ define <16 x float> @stack_fold_permilps
>  declare <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float>, <16 x i32>) nounwind readnone
>
>  define <16 x float> @stack_fold_permilpsvar_zmm_mask(<16 x float>* %passthru, <16 x float> %a0, <16 x i32> %a1, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_zmm_mask
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_zmm_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovaps (%rdi), %zmm2
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 64-byte Folded Reload
> +; CHECK-NEXT:    vmovaps %zmm2, %zmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %a1)
>    %3 = bitcast i16 %mask to <16 x i1>
> @@ -1125,8 +2034,15 @@ define <16 x float> @stack_fold_permilps
>  }
>
>  define <16 x float> @stack_fold_permilpsvar_zmm_maskz(<16 x float> %a0, <16 x i32> %a1, i16 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_zmm_maskz
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_zmm_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 {%k1} {z} # 64-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %a0, <16 x i32> %a1)
>    %3 = bitcast i16 %mask to <16 x i1>
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-avx512vl.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,40 +10,72 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_addpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd_ymm
> -  ;CHECK:       vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_addps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps_ymm
> -  ;CHECK:       vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fadd <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd
> -  ;CHECK:       vandnpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -55,8 +88,16 @@ define <2 x double> @stack_fold_andnpd(<
>  }
>
>  define <4 x double> @stack_fold_andnpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd_ymm
> -  ;CHECK:       vandnpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -69,8 +110,16 @@ define <4 x double> @stack_fold_andnpd_y
>  }
>
>  define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps
> -  ;CHECK:       vandnps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -83,8 +132,16 @@ define <4 x float> @stack_fold_andnps(<4
>  }
>
>  define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps_ymm
> -  ;CHECK:       vandnps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandnps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <4 x i64>
>    %3 = bitcast <8 x float> %a1 to <4 x i64>
> @@ -97,8 +154,16 @@ define <8 x float> @stack_fold_andnps_ym
>  }
>
>  define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd
> -  ;CHECK:       vandpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -110,8 +175,16 @@ define <2 x double> @stack_fold_andpd(<2
>  }
>
>  define <4 x double> @stack_fold_andpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd_ymm
> -  ;CHECK:       vandpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -123,8 +196,16 @@ define <4 x double> @stack_fold_andpd_ym
>  }
>
>  define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps
> -  ;CHECK:       vandps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <4 x i32>
>    %3 = bitcast <4 x float> %a1 to <4 x i32>
> @@ -136,8 +217,16 @@ define <4 x float> @stack_fold_andps(<4
>  }
>
>  define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps_ymm
> -  ;CHECK:       vandps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vandps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <8 x i32>
>    %3 = bitcast <8 x float> %a1 to <8 x i32>
> @@ -149,8 +238,16 @@ define <8 x float> @stack_fold_andps_ymm
>  }
>
>  define i8 @stack_fold_cmppd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-9]}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 # 16-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $al killed $al killed $eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <2 x i1> @llvm.x86.avx512.cmp.pd.128(<2 x double> %a0, <2 x double> %a1, i32 0)
>    %2 = shufflevector <2 x i1> %res, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
> @@ -160,8 +257,17 @@ define i8 @stack_fold_cmppd(<2 x double>
>  declare <2 x i1> @llvm.x86.avx512.cmp.pd.128(<2 x double>, <2 x double>, i32)
>
>  define i8 @stack_fold_cmppd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd_ymm
> -  ;CHECK:       vcmpeqpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%k[0-9]}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 # 32-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $al killed $al killed $eax
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <4 x i1> @llvm.x86.avx512.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0)
>    %2 = shufflevector <4 x i1> %res, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
> @@ -171,8 +277,16 @@ define i8 @stack_fold_cmppd_ymm(<4 x dou
>  declare <4 x i1> @llvm.x86.avx512.cmp.pd.256(<4 x double>, <4 x double>, i32)
>
>  define i8 @stack_fold_cmpps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 # 16-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $al killed $al killed $eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <4 x i1> @llvm.x86.avx512.cmp.ps.128(<4 x float> %a0, <4 x float> %a1, i32 0)
>    %2 = shufflevector <4 x i1> %res, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
> @@ -182,8 +296,17 @@ define i8 @stack_fold_cmpps(<4 x float>
>  declare <4 x i1> @llvm.x86.avx512.cmp.ps.128(<4 x float>, <4 x float>, i32)
>
>  define i8 @stack_fold_cmpps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps_ymm
> -  ;CHECK:       vcmpeqps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%k[0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 # 32-byte Folded Reload
> +; CHECK-NEXT:    kmovw %k0, %eax
> +; CHECK-NEXT:    # kill: def $al killed $al killed $eax
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %res = call <8 x i1> @llvm.x86.avx512.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0)
>    %2 = bitcast <8 x i1> %res to i8
> @@ -192,40 +315,70 @@ define i8 @stack_fold_cmpps_ymm(<8 x flo
>  declare <8 x i1> @llvm.x86.avx512.cmp.ps.256(<8 x float>, <8 x float>, i32)
>
>  define <2 x double> @stack_fold_divpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divpd
> -  ;CHECK:       vdivpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fdiv <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_divpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divpd_ymm
> -  ;CHECK:       vdivpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fdiv <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_divps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divps
> -  ;CHECK:       vdivps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fdiv <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_divps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divps_ymm
> -  ;CHECK:       vdivps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vdivps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fdiv <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_cvtdq2pd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
>    %3 = sitofp <2 x i32> %2 to <2 x double>
> @@ -233,16 +386,28 @@ define <2 x double> @stack_fold_cvtdq2pd
>  }
>
>  define <4 x double> @stack_fold_cvtdq2pd_ymm(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd_ymm
> -  ;CHECK:   vcvtdq2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = sitofp <4 x i32> %a0 to <4 x double>
>    ret <4 x double> %2
>  }
>
>  define <2 x double> @stack_fold_cvtudq2pd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtudq2pd
> -  ;CHECK:   vcvtudq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtudq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtudq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
>    %3 = uitofp <2 x i32> %2 to <2 x double>
> @@ -250,32 +415,57 @@ define <2 x double> @stack_fold_cvtudq2p
>  }
>
>  define <4 x double> @stack_fold_cvtudq2pd_ymm(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtudq2pd_ymm
> -  ;CHECK:   vcvtudq2pd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtudq2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtudq2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = uitofp <4 x i32> %a0 to <4 x double>
>    ret <4 x double> %2
>  }
>
>  define <2 x float> @stack_fold_cvtpd2ps(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps
> -  ;CHECK:   vcvtpd2psx {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2psx {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fptrunc <2 x double> %a0 to <2 x float>
>    ret <2 x float> %2
>  }
>
>  define <4 x float> @stack_fold_cvtpd2ps_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps_ymm
> -  ;CHECK:   vcvtpd2psy {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vcvtpd2psy {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fptrunc <4 x double> %a0 to <4 x float>
>    ret <4 x float> %2
>  }
>
>  define <2 x double> @stack_fold_maxpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -283,16 +473,28 @@ define <2 x double> @stack_fold_maxpd(<2
>  declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <2 x double> @stack_fold_maxpd_commutable(<2 x double> %a0, <2 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_commutable
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_maxpd_ymm(<4 x double> %a0, <4 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd_ymm
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
> @@ -300,16 +502,28 @@ define <4 x double> @stack_fold_maxpd_ym
>  declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwind readnone
>
>  define <4 x double> @stack_fold_maxpd_ymm_commutable(<4 x double> %a0, <4 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_ymm_commutable
> -  ;CHECK:       vmaxpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1)
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_maxps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -317,16 +531,28 @@ define <4 x float> @stack_fold_maxps(<4
>  declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_maxps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_commutable
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_maxps_ymm(<8 x float> %a0, <8 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps_ymm
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -334,16 +560,28 @@ define <8 x float> @stack_fold_maxps_ymm
>  declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_maxps_ymm_commutable(<8 x float> %a0, <8 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_ymm_commutable
> -  ;CHECK:       vmaxps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmaxps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
>  }
>
>  define <4 x float> @stack_fold_minps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -351,16 +589,28 @@ define <4 x float> @stack_fold_minps(<4
>  declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_minps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_commutable
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_minps_ymm(<8 x float> %a0, <8 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps_ymm
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
> @@ -368,48 +618,86 @@ define <8 x float> @stack_fold_minps_ymm
>  declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind readnone
>
>  define <8 x float> @stack_fold_minps_ymm_commutable(<8 x float> %a0, <8 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_ymm_commutable
> -  ;CHECK:       vminps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_ymm_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vminps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1)
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_mulpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_mulpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd_ymm
> -  ;CHECK:       vmulpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_mulps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_mulps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps_ymm
> -  ;CHECK:       vmulps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmulps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fmul <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_orpd
> -  ;CHECK:       vorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -421,8 +709,16 @@ define <2 x double> @stack_fold_orpd(<2
>  }
>
>  define <4 x double> @stack_fold_orpd_ymm(<4 x double> %a0, <4 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_orpd_ymm
> -  ;CHECK:       vorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -434,8 +730,16 @@ define <4 x double> @stack_fold_orpd_ymm
>  }
>
>  define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_orps
> -  ;CHECK:       vorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <4 x i32>
>    %3 = bitcast <4 x float> %a1 to <4 x i32>
> @@ -447,8 +751,16 @@ define <4 x float> @stack_fold_orps(<4 x
>  }
>
>  define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_orps_ymm
> -  ;CHECK:       vorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vorps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <8 x i32>
>    %3 = bitcast <8 x float> %a1 to <8 x i32>
> @@ -460,8 +772,18 @@ define <8 x float> @stack_fold_orps_ymm(
>  }
>
>  define <4 x double> @stack_fold_shuff64x2_maskz(<4 x double> %a, <4 x double> %b, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_shuff64x2_maskz
> -  ;CHECK:   vshuff64x2 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff64x2_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    vshuff64x2 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 {%k1} {z} = ymm0[2,3],mem[0,1]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -471,8 +793,18 @@ define <4 x double> @stack_fold_shuff64x
>  }
>
>  define <8 x float> @stack_fold_shuff32x4_maskz(<8 x float> %a, <8 x float> %b, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_shuff32x4_maskz
> -  ;CHECK:   vshuff32x4 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[1-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shuff32x4_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    vshuff32x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 {%k1} {z} = ymm0[4,5,6,7],mem[0,1,2,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -481,16 +813,33 @@ define <8 x float> @stack_fold_shuff32x4
>  }
>
>  define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufps
> -  ;CHECK:       vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufps $200, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0,2],mem[0,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
>    ret <4 x float> %2
>  }
>
>  define <4 x float> @stack_fold_shufps_mask(<4 x float>* %passthru, <4 x float> %a0, <4 x float> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_shufps_mask
> -  ;CHECK:       vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps_mask:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %esi, %k1
> +; CHECK-NEXT:    vmovaps (%rdi), %xmm2
> +; CHECK-NEXT:    vshufps $200, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm2 {%k1} = xmm0[0,2],mem[0,3]
> +; CHECK-NEXT:    vmovaps %xmm2, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -501,8 +850,16 @@ define <4 x float> @stack_fold_shufps_ma
>  }
>
>  define <4 x float> @stack_fold_shufps_maskz(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_shufps_maskz
> -  ;CHECK:       vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vshufps $200, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 {%k1} {z} # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 {%k1} {z} = xmm0[0,2],mem[0,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
>    %3 = bitcast i8 %mask to <8 x i1>
> @@ -512,48 +869,87 @@ define <4 x float> @stack_fold_shufps_ma
>  }
>
>  define <8 x float> @stack_fold_shufps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufps_ymm
> -  ;CHECK:       vshufps $148, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vshufps $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0,1],mem[1,2],ymm0[4,5],mem[5,6]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 13, i32 14>
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd
> -  ;CHECK:       vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_subpd_ymm(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd_ymm
> -  ;CHECK:       vsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <4 x double> %a0, %a1
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps
> -  ;CHECK:       vsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_subps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps_ymm
> -  ;CHECK:       vsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vsubps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = fsub <8 x float> %a0, %a1
>    ret <8 x float> %2
>  }
>
>  define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_xorpd
> -  ;CHECK:       vxorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -565,8 +961,16 @@ define <2 x double> @stack_fold_xorpd(<2
>  }
>
>  define <4 x double> @stack_fold_xorpd_ymm(<4 x double> %a0, <4 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_xorpd_ymm
> -  ;CHECK:       vxorpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x double> %a0 to <4 x i64>
>    %3 = bitcast <4 x double> %a1 to <4 x i64>
> @@ -578,8 +982,16 @@ define <4 x double> @stack_fold_xorpd_ym
>  }
>
>  define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorps
> -  ;CHECK:       vxorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <4 x i32>
>    %3 = bitcast <4 x float> %a1 to <4 x i32>
> @@ -591,8 +1003,16 @@ define <4 x float> @stack_fold_xorps(<4
>  }
>
>  define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorps_ymm
> -  ;CHECK:       vxorps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vxorps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = bitcast <8 x float> %a0 to <8 x i32>
>    %3 = bitcast <8 x float> %a1 to <8 x i32>
> @@ -604,104 +1024,189 @@ define <8 x float> @stack_fold_xorps_ymm
>  }
>
>  define <4 x float> @stack_fold_extractf32x4(<8 x float> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_extractf32x4
> -  ;CHECK:       vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extractf32x4:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf128 $1, %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <4 x float> %1
>  }
>
>  define <2 x double> @stack_fold_extractf64x2(<4 x double> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_extractf64x2
> -  ;CHECK:       vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extractf64x2:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vextractf128 $1, %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = shufflevector <4 x double> %a0, <4 x double> %a1, <2 x i32> <i32 2, i32 3>
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    ret <2 x double> %1
>  }
>
>  define <8 x float> @stack_fold_insertf32x4(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertf32x4
> -  ;CHECK:       vinsertf128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf32x4:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ret <8 x float> %2
>  }
>
>  define <4 x double> @stack_fold_insertf64x2(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertf64x2
> -  ;CHECK:       vinsertf128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_insertf64x2:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
>    ret <4 x double> %2
>  }
>
>  define <4 x float> @stack_fold_vpermt2ps(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2ps
> -  ;CHECK:       vpermt2ps {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %x0, <4 x i32> %x1, <4 x float> %x2)
>    ret <4 x float> %2
>  }
>
>  define <4 x float> @stack_fold_vpermi2ps(<4 x i32> %x0, <4 x float> %x1, <4 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2ps
> -  ;CHECK:       vpermi2ps {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.avx512.vpermi2var.ps.128(<4 x float> %x1, <4 x i32> %x0, <4 x float> %x2)
>    ret <4 x float> %2
>  }
>
>  define <2 x double> @stack_fold_vpermt2pd(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2pd
> -  ;CHECK:       vpermt2pd {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %x0, <2 x i64> %x1, <2 x double> %x2)
>    ret <2 x double> %2
>  }
>
>  define <2 x double> @stack_fold_vpermi2pd(<2 x i64> %x0, <2 x double> %x1, <2 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2pd
> -  ;CHECK:       vpermi2pd {{-?[0-9]*}}(%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.avx512.vpermi2var.pd.128(<2 x double> %x1, <2 x i64> %x0, <2 x double> %x2)
>    ret <2 x double> %2
>  }
>
>  define <8 x float> @stack_fold_vpermt2ps_ymm(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2ps_ymm
> -  ;CHECK:       vpermt2ps {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %x0, <8 x i32> %x1, <8 x float> %x2)
>    ret <8 x float> %2
>  }
>
>  define <8 x float> @stack_fold_vpermi2ps_ymm(<8 x i32> %x0, <8 x float> %x1, <8 x float> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2ps_ymm
> -  ;CHECK:       vpermi2ps {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2ps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2ps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx512.vpermi2var.ps.256(<8 x float> %x1, <8 x i32> %x0, <8 x float> %x2)
>    ret <8 x float> %2
>  }
>
>  define <4 x double> @stack_fold_vpermt2pd_ymm(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermt2pd_ymm
> -  ;CHECK:       vpermt2pd {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermt2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermt2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %x0, <4 x i64> %x1, <4 x double> %x2)
>    ret <4 x double> %2
>  }
>
>  define <4 x double> @stack_fold_vpermi2pd_ymm(<4 x i64> %x0, <4 x double> %x1, <4 x double> %x2) {
> -  ;CHECK-LABEL: stack_fold_vpermi2pd_ymm
> -  ;CHECK:       vpermi2pd {{-?[0-9]*}}(%rsp), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpermi2pd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermi2pd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <4 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx512.vpermi2var.pd.256(<4 x double> %x1, <4 x i64> %x0, <4 x double> %x2)
>    ret <4 x double> %2
>  }
>
>  define <4 x double> @stack_fold_permpd(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permpd
> -  ;CHECK:   vpermpd $235, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermpd $235, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,2,3]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
>    ; fadd forces execution domain
> @@ -710,8 +1215,18 @@ define <4 x double> @stack_fold_permpd(<
>  }
>
>  define <4 x double> @stack_fold_permpdvar(<4 x i64> %a0, <4 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_permpdvar
> -  ;CHECK:   vpermpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpdvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
> +; CHECK-NEXT:    vpermpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double> %a1, <4 x i64> %a0)
>    ; fadd forces execution domain
> @@ -721,8 +1236,14 @@ define <4 x double> @stack_fold_permpdva
>  declare <4 x double> @llvm.x86.avx512.permvar.df.256(<4 x double>, <4 x i64>) nounwind readonly
>
>  define <8 x float> @stack_fold_permps(<8 x i32> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_permps
> -  ;CHECK:       vpermps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a1, <8 x i32> %a0)
>    ret <8 x float> %2
> @@ -730,24 +1251,44 @@ define <8 x float> @stack_fold_permps(<8
>  declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
>
>  define <2 x double> @stack_fold_permilpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilpd
> -  ;CHECK:   vpermilpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[1,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 1, i32 0>
>    ret <2 x double> %2
>  }
>
>  define <4 x double> @stack_fold_permilpd_ymm(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilpd_ymm
> -  ;CHECK:   vpermilpd $5, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd $5, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[1,0,3,2]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
>    ret <4 x double> %2
>  }
>
>  define <2 x double> @stack_fold_permilpdvar(<2 x double> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1)
>    ret <2 x double> %2
> @@ -755,8 +1296,14 @@ define <2 x double> @stack_fold_permilpd
>  declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwind readnone
>
>  define <4 x double> @stack_fold_permilpdvar_ymm(<4 x double> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpdvar_ymm
> -  ;CHECK:       vpermilpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpdvar_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1)
>    ret <4 x double> %2
> @@ -764,24 +1311,44 @@ define <4 x double> @stack_fold_permilpd
>  declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) nounwind readnone
>
>  define <4 x float> @stack_fold_permilps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilps
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[3,2,1,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
>    ret <4 x float> %2
>  }
>
>  define <8 x float> @stack_fold_permilps_ymm(<8 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_permilps_ymm
> -  ;CHECK:   vpermilps $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilps_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps $27, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,1,0,7,6,5,4]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
>    ret <8 x float> %2
>  }
>
>  define <4 x float> @stack_fold_permilpsvar(<4 x float> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1)
>    ret <4 x float> %2
> @@ -789,8 +1356,14 @@ define <4 x float> @stack_fold_permilpsv
>  declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind readnone
>
>  define <8 x float> @stack_fold_permilpsvar_ymm(<8 x float> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_ymm
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
>    ret <8 x float> %2
> @@ -798,8 +1371,15 @@ define <8 x float> @stack_fold_permilpsv
>  declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) nounwind readnone
>
>  define <8 x float> @stack_fold_permilpsvar_ymm_maskz(<8 x float> %a0, <8 x i32> %a1, i8 %mask) {
> -  ;CHECK-LABEL: stack_fold_permilpsvar_ymm_maskz
> -  ;CHECK:       vpermilps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permilpsvar_ymm_maskz:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    kmovw %edi, %k1
> +; CHECK-NEXT:    vpermilps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 {%k1} {z} # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1)
>    %3 = bitcast i8 %mask to <8 x i1>
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-fp-sse42.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -verify-machineinstrs -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,32 +10,56 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addpd
> -  ;CHECK:       addpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addps
> -  ;CHECK:       addps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_addsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd
> -  ;CHECK:       addsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_addsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsd_int
> -  ;CHECK:       addsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -44,16 +69,28 @@ define <2 x double> @stack_fold_addsd_in
>  }
>
>  define float @stack_fold_addss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_addss
> -  ;CHECK:       addss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fadd float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addss_int
> -  ;CHECK:       addss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -63,8 +100,14 @@ define <4 x float> @stack_fold_addss_int
>  }
>
>  define <2 x double> @stack_fold_addsubpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubpd
> -  ;CHECK:       addsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -72,8 +115,14 @@ define <2 x double> @stack_fold_addsubpd
>  declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_addsubps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_addsubps
> -  ;CHECK:       addsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_addsubps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    addsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -81,8 +130,16 @@ define <4 x float> @stack_fold_addsubps(
>  declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnpd
> -  ;CHECK:       andnpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    andnpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -95,8 +152,16 @@ define <2 x double> @stack_fold_andnpd(<
>  }
>
>  define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andnps
> -  ;CHECK:       andnps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andnps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    andnps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -109,8 +174,16 @@ define <4 x float> @stack_fold_andnps(<4
>  }
>
>  define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_andpd
> -  ;CHECK:       andpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    andpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -122,8 +195,16 @@ define <2 x double> @stack_fold_andpd(<2
>  }
>
>  define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_andps
> -  ;CHECK:       andps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_andps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    andps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -135,8 +216,17 @@ define <4 x float> @stack_fold_andps(<4
>  }
>
>  define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendpd
> -  ;CHECK:       blendpd $2, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[1]
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <2 x i1> <i1 1, i1 0>, <2 x double> %a0, <2 x double> %a1
>    ; fadd forces execution domain
> @@ -145,8 +235,17 @@ define <2 x double> @stack_fold_blendpd(
>  }
>
>  define <4 x float> @stack_fold_blendps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_blendps
> -  ;CHECK:       blendps $6, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blendps $6, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[1,2],xmm0[3]
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = select <4 x i1> <i1 1, i1 0, i1 0, i1 1>, <4 x float> %a0, <4 x float> %a1
>    ; fadd forces execution domain
> @@ -155,8 +254,16 @@ define <4 x float> @stack_fold_blendps(<
>  }
>
>  define <2 x double> @stack_fold_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvpd
> -  ;CHECK:       blendvpd %xmm0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movapd %xmm1, %xmm2
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blendvpd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
> +; CHECK-NEXT:    movapd %xmm2, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a1, <2 x double> %c, <2 x double> %a0)
>    ret <2 x double> %2
> @@ -164,8 +271,16 @@ define <2 x double> @stack_fold_blendvpd
>  declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %c) {
> -  ;CHECK-LABEL: stack_fold_blendvps
> -  ;CHECK:       blendvps %xmm0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_blendvps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm1, %xmm2
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    blendvps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
> +; CHECK-NEXT:    movaps %xmm2, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a1, <4 x float> %c, <4 x float> %a0)
>    ret <4 x float> %2
> @@ -173,8 +288,14 @@ define <4 x float> @stack_fold_blendvps(
>  declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cmppd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmppd
> -  ;CHECK:       cmpeqpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 0)
>    ret <2 x double> %2
> @@ -182,8 +303,14 @@ define <2 x double> @stack_fold_cmppd(<2
>  declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define <4 x float> @stack_fold_cmpps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpps
> -  ;CHECK:       cmpeqps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 0)
>    ret <4 x float> %2
> @@ -191,8 +318,17 @@ define <4 x float> @stack_fold_cmpps(<4
>  declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define i32 @stack_fold_cmpsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpsd
> -  ;CHECK:       cmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    movq %xmm0, %rax
> +; CHECK-NEXT:    andl $1, %eax
> +; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp oeq double %a0, %a1
>    %3 = zext i1 %2 to i32
> @@ -200,8 +336,14 @@ define i32 @stack_fold_cmpsd(double %a0,
>  }
>
>  define <2 x double> @stack_fold_cmpsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpsd_int
> -  ;CHECK:       cmpeqsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
>    ret <2 x double> %2
> @@ -209,8 +351,16 @@ define <2 x double> @stack_fold_cmpsd_in
>  declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define i32 @stack_fold_cmpss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpss
> -  ;CHECK:       cmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    movd %xmm0, %eax
> +; CHECK-NEXT:    andl $1, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp oeq float %a0, %a1
>    %3 = zext i1 %2 to i32
> @@ -218,8 +368,14 @@ define i32 @stack_fold_cmpss(float %a0,
>  }
>
>  define <4 x float> @stack_fold_cmpss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_cmpss_int
> -  ;CHECK:       cmpeqss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cmpss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cmpeqss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 0)
>    ret <4 x float> %2
> @@ -229,8 +385,18 @@ declare <4 x float> @llvm.x86.sse.cmp.ss
>  ; TODO stack_fold_comisd
>
>  define i32 @stack_fold_comisd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_comisd_int
> -  ;CHECK:       comisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_comisd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    comisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
>    ret i32 %2
> @@ -240,8 +406,18 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2
>  ; TODO stack_fold_comiss
>
>  define i32 @stack_fold_comiss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_comiss_int
> -  ;CHECK:       comiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_comiss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    comiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1)
>    ret i32 %2
> @@ -249,8 +425,14 @@ define i32 @stack_fold_comiss_int(<4 x f
>  declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cvtdq2pd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd
> -  ;CHECK:       cvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
>    %3 = sitofp <2 x i32> %2 to <2 x double>
> @@ -258,8 +440,14 @@ define <2 x double> @stack_fold_cvtdq2pd
>  }
>
>  define <2 x double> @stack_fold_cvtdq2pd_int(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2pd_int
> -  ;CHECK:       cvtdq2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2pd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtdq2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a0, <2 x i32> <i32 0, i32 1>
>    %cvt = sitofp <2 x i32> %2 to <2 x double>
> @@ -267,16 +455,28 @@ define <2 x double> @stack_fold_cvtdq2pd
>  }
>
>  define <4 x float> @stack_fold_cvtdq2ps(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtdq2ps
> -  ;CHECK:       cvtdq2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtdq2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtdq2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sitofp <4 x i32> %a0 to <4 x float>
>    ret <4 x float> %2
>  }
>
>  define <4 x i32> @stack_fold_cvtpd2dq(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2dq
> -  ;CHECK:       cvtpd2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtpd2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
>    ret <4 x i32> %2
> @@ -284,16 +484,28 @@ define <4 x i32> @stack_fold_cvtpd2dq(<2
>  declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
>
>  define <2 x float> @stack_fold_cvtpd2ps(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtpd2ps
> -  ;CHECK:       cvtpd2ps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtpd2ps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtpd2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptrunc <2 x double> %a0 to <2 x float>
>    ret <2 x float> %2
>  }
>
>  define <4 x i32> @stack_fold_cvtps2dq(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2dq
> -  ;CHECK:       cvtps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtps2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
>    ret <4 x i32> %2
> @@ -301,8 +513,14 @@ define <4 x i32> @stack_fold_cvtps2dq(<4
>  declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_cvtps2pd(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd
> -  ;CHECK:       cvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
>    %3 = fpext <2 x float> %2 to <2 x double>
> @@ -310,8 +528,14 @@ define <2 x double> @stack_fold_cvtps2pd
>  }
>
>  define <2 x double> @stack_fold_cvtps2pd_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtps2pd_int
> -  ;CHECK:       cvtps2pd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtps2pd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtps2pd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
>    %cvtps2pd = fpext <2 x float> %2 to <2 x double>
> @@ -321,8 +545,14 @@ define <2 x double> @stack_fold_cvtps2pd
>  ; TODO stack_fold_cvtsd2si
>
>  define i32 @stack_fold_cvtsd2si_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsd2si_int
> -  ;CHECK:       cvtsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
>    ret i32 %2
> @@ -332,8 +562,14 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x
>  ; TODO stack_fold_cvtsd2si64
>
>  define i64 @stack_fold_cvtsd2si64_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsd2si64_int
> -  ;CHECK:       cvtsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0)
>    ret i64 %2
> @@ -341,16 +577,31 @@ define i64 @stack_fold_cvtsd2si64_int(<2
>  declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
>
>  define float @stack_fold_cvtsd2ss(double %a0) minsize {
> -  ;CHECK-LABEL: stack_fold_cvtsd2ss
> -  ;CHECK:       cvtsd2ss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2ss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtsd2ss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptrunc double %a0 to float
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_cvtsd2ss_int(<2 x double> %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_cvtsd2ss_int
> -  ;CHECK:       cvtsd2ss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsd2ss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    cvtsd2ss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
> +; CHECK-NEXT:    movaps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> <float 0x0, float 0x0, float 0x0, float 0x0>, <2 x double> %a0)
>    ret <4 x float> %2
> @@ -358,16 +609,89 @@ define <4 x float> @stack_fold_cvtsd2ss_
>  declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
>
>  define double @stack_fold_cvtsi2sd(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2sd
> -  ;CHECK:       cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2sd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtsi2sdl {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to double
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_cvtsi2sd_int(i32 %a0, <2 x double> %b0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2sd_int
> -  ;CHECK:       cvtsi2sdl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2sd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsi2sdl {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to double
>    %3 = insertelement <2 x double> %b0, double %2, i64 0
> @@ -375,16 +699,89 @@ define <2 x double> @stack_fold_cvtsi2sd
>  }
>
>  define double @stack_fold_cvtsi642sd(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642sd
> -  ;CHECK:       cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642sd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtsi2sdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to double
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_cvtsi642sd_int(i64 %a0, <2 x double> %b0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642sd_int
> -  ;CHECK:       cvtsi2sdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642sd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsi2sdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to double
>    %3 = insertelement <2 x double> %b0, double %2, i64 0
> @@ -392,16 +789,89 @@ define <2 x double> @stack_fold_cvtsi642
>  }
>
>  define float @stack_fold_cvtsi2ss(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2ss
> -  ;CHECK:       cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2ss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtsi2ssl {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to float
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_cvtsi2ss_int(i32 %a0, <4 x float> %b0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi2ss_int
> -  ;CHECK:  cvtsi2ssl {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi2ss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsi2ssl {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i32 %a0 to float
>    %3 = insertelement <4 x float> %b0, float %2, i64 0
> @@ -409,16 +879,89 @@ define <4 x float> @stack_fold_cvtsi2ss_
>  }
>
>  define float @stack_fold_cvtsi642ss(i64 %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642ss
> -  ;CHECK:       cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642ss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtsi2ssq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to float
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_cvtsi642ss_int(i64 %a0, <4 x float> %b0) {
> -  ;CHECK-LABEL: stack_fold_cvtsi642ss_int
> -  ;CHECK:  cvtsi2ssq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtsi642ss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtsi2ssq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = sitofp i64 %a0 to float
>    %3 = insertelement <4 x float> %b0, float %2, i64 0
> @@ -426,16 +969,31 @@ define <4 x float> @stack_fold_cvtsi642s
>  }
>
>  define double @stack_fold_cvtss2sd(float %a0) minsize {
> -  ;CHECK-LABEL: stack_fold_cvtss2sd
> -  ;CHECK:       cvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2sd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtss2sd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fpext float %a0 to double
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_cvtss2sd_int(<4 x float> %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_cvtss2sd_int
> -  ;CHECK:       cvtss2sd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2sd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    cvtss2sd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i64 0
>    %3 = fpext float %2 to double
> @@ -446,8 +1004,14 @@ define <2 x double> @stack_fold_cvtss2sd
>  ; TODO stack_fold_cvtss2si
>
>  define i32 @stack_fold_cvtss2si_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtss2si_int
> -  ;CHECK:       cvtss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0)
>    ret i32 %2
> @@ -457,8 +1021,14 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x
>  ; TODO stack_fold_cvtss2si64
>
>  define i64 @stack_fold_cvtss2si64_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvtss2si64_int
> -  ;CHECK:       cvtss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvtss2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvtss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0)
>    ret i64 %2
> @@ -466,8 +1036,14 @@ define i64 @stack_fold_cvtss2si64_int(<4
>  declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
>
>  define <4 x i32> @stack_fold_cvttpd2dq(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttpd2dq
> -  ;CHECK:       cvttpd2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttpd2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttpd2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
>    ret <4 x i32> %2
> @@ -475,24 +1051,42 @@ define <4 x i32> @stack_fold_cvttpd2dq(<
>  declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
>
>  define <4 x i32> @stack_fold_cvttps2dq(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttps2dq
> -  ;CHECK:       cvttps2dq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttps2dq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttps2dq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi <4 x float> %a0 to <4 x i32>
>    ret <4 x i32> %2
>  }
>
>  define i32 @stack_fold_cvttsd2si(double %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si
> -  ;CHECK:       cvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi double %a0 to i32
>    ret i32 %2
>  }
>
>  define i32 @stack_fold_cvttsd2si_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si_int
> -  ;CHECK:       cvttsd2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
>    ret i32 %2
> @@ -500,16 +1094,28 @@ define i32 @stack_fold_cvttsd2si_int(<2
>  declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
>
>  define i64 @stack_fold_cvttsd2si64(double %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si64
> -  ;CHECK:       cvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi double %a0 to i64
>    ret i64 %2
>  }
>
>  define i64 @stack_fold_cvttsd2si64_int(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttsd2si64_int
> -  ;CHECK:       cvttsd2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttsd2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttsd2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0)
>    ret i64 %2
> @@ -517,16 +1123,28 @@ define i64 @stack_fold_cvttsd2si64_int(<
>  declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
>
>  define i32 @stack_fold_cvttss2si(float %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si
> -  ;CHECK:       cvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi float %a0 to i32
>    ret i32 %2
>  }
>
>  define i32 @stack_fold_cvttss2si_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si_int
> -  ;CHECK:       cvttss2si {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %eax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0)
>    ret i32 %2
> @@ -534,16 +1152,28 @@ define i32 @stack_fold_cvttss2si_int(<4
>  declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
>
>  define i64 @stack_fold_cvttss2si64(float %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si64
> -  ;CHECK:       cvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si64:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fptosi float %a0 to i64
>    ret i64 %2
>  }
>
>  define i64 @stack_fold_cvttss2si64_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_cvttss2si64_int
> -  ;CHECK:       cvttss2si {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_cvttss2si64_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    cvttss2si {{[-0-9]+}}(%r{{[sb]}}p), %rax # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %a0)
>    ret i64 %2
> @@ -551,32 +1181,56 @@ define i64 @stack_fold_cvttss2si64_int(<
>  declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_divpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divpd
> -  ;CHECK:       divpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_divps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divps
> -  ;CHECK:       divps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_divsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_divsd
> -  ;CHECK:       divsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_divsd_int
> -  ;CHECK:       divsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -587,16 +1241,28 @@ define <2 x double> @stack_fold_divsd_in
>  declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_divss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_divss
> -  ;CHECK:       divss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fdiv float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_divss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_divss_int
> -  ;CHECK:       divss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_divss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    divss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -607,8 +1273,14 @@ define <4 x float> @stack_fold_divss_int
>  declare <4 x float> @llvm.x86.sse.div.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_dppd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_dppd
> -  ;CHECK:       dppd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_dppd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    dppd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7)
>    ret <2 x double> %2
> @@ -616,8 +1288,14 @@ define <2 x double> @stack_fold_dppd(<2
>  declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwind readnone
>
>  define <4 x float> @stack_fold_dpps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_dpps
> -  ;CHECK:       dpps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_dpps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    dpps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7)
>    ret <4 x float> %2
> @@ -625,9 +1303,45 @@ define <4 x float> @stack_fold_dpps(<4 x
>  declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define i32 @stack_fold_extractps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_extractps
> -  ;CHECK:       extractps $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
> -  ;CHECK:       movl    {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
> +; CHECK-LABEL: stack_fold_extractps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    extractps $1, %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    ; fadd forces execution domain
>    %1 = fadd <4 x float> %a0, %a1
>    %2 = extractelement <4 x float> %1, i32 1
> @@ -637,8 +1351,14 @@ define i32 @stack_fold_extractps(<4 x fl
>  }
>
>  define <2 x double> @stack_fold_haddpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddpd
> -  ;CHECK:       haddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    haddpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -646,8 +1366,14 @@ define <2 x double> @stack_fold_haddpd(<
>  declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_haddps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_haddps
> -  ;CHECK:       haddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_haddps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    haddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -655,8 +1381,14 @@ define <4 x float> @stack_fold_haddps(<4
>  declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_hsubpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubpd
> -  ;CHECK:       hsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    hsubpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -664,8 +1396,14 @@ define <2 x double> @stack_fold_hsubpd(<
>  declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <4 x float> @stack_fold_hsubps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_hsubps
> -  ;CHECK:       hsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_hsubps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    hsubps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -673,9 +1411,15 @@ define <4 x float> @stack_fold_hsubps(<4
>  declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_insertps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_insertps
> -  ;CHECK:       insertps $17, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> -  ;CHECK-NEXT:                                                        {{.*#+}} xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-LABEL: stack_fold_insertps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    insertps $17, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = zero,mem[0],xmm0[2,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 209)
>    ret <4 x float> %2
> @@ -683,8 +1427,14 @@ define <4 x float> @stack_fold_insertps(
>  declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
>
>  define <2 x double> @stack_fold_maxpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxpd
> -  ;CHECK:       maxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -692,16 +1442,28 @@ define <2 x double> @stack_fold_maxpd(<2
>  declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <2 x double> @stack_fold_maxpd_commutable(<2 x double> %a0, <2 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxpd_commutable
> -  ;CHECK:       maxpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxpd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_maxps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxps
> -  ;CHECK:       maxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -709,16 +1471,28 @@ define <4 x float> @stack_fold_maxps(<4
>  declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_maxps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxps_commutable
> -  ;CHECK:       maxps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_maxsd(double %a0, double %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxsd
> -  ;CHECK:       maxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -726,8 +1500,14 @@ define double @stack_fold_maxsd(double %
>  }
>
>  define double @stack_fold_maxsd_commutable(double %a0, double %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxsd_commutable
> -  ;CHECK:       maxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -735,8 +1515,14 @@ define double @stack_fold_maxsd_commutab
>  }
>
>  define <2 x double> @stack_fold_maxsd_int(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxsd_int
> -  ;CHECK:       maxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -744,8 +1530,14 @@ define <2 x double> @stack_fold_maxsd_in
>  declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_maxss(float %a0, float %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxss
> -  ;CHECK:       maxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -753,8 +1545,14 @@ define float @stack_fold_maxss(float %a0
>  }
>
>  define float @stack_fold_maxss_commutable(float %a0, float %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_maxss_commutable
> -  ;CHECK:       maxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ogt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -762,8 +1560,14 @@ define float @stack_fold_maxss_commutabl
>  }
>
>  define <4 x float> @stack_fold_maxss_int(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_maxss_int
> -  ;CHECK:       maxss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_maxss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    maxss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -771,8 +1575,14 @@ define <4 x float> @stack_fold_maxss_int
>  declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_minpd(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minpd
> -  ;CHECK:       minpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -780,16 +1590,28 @@ define <2 x double> @stack_fold_minpd(<2
>  declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
>
>  define <2 x double> @stack_fold_minpd_commutable(<2 x double> %a0, <2 x double> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minpd_commutable
> -  ;CHECK:       minpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minpd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_minps(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minps
> -  ;CHECK:       minps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -797,16 +1619,28 @@ define <4 x float> @stack_fold_minps(<4
>  declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
>
>  define <4 x float> @stack_fold_minps_commutable(<4 x float> %a0, <4 x float> %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minps_commutable
> -  ;CHECK:       minps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minps_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_minsd(double %a0, double %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minsd
> -  ;CHECK:       minsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -814,8 +1648,14 @@ define double @stack_fold_minsd(double %
>  }
>
>  define double @stack_fold_minsd_commutable(double %a0, double %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minsd_commutable
> -  ;CHECK:       minsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt double %a0, %a1
>    %3 = select i1 %2, double %a0, double %a1
> @@ -823,8 +1663,14 @@ define double @stack_fold_minsd_commutab
>  }
>
>  define <2 x double> @stack_fold_minsd_int(<2 x double> %a0, <2 x double> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minsd_int
> -  ;CHECK:       minsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
>    ret <2 x double> %2
> @@ -832,8 +1678,14 @@ define <2 x double> @stack_fold_minsd_in
>  declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define float @stack_fold_minss(float %a0, float %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minss
> -  ;CHECK:       minss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -841,8 +1693,14 @@ define float @stack_fold_minss(float %a0
>  }
>
>  define float @stack_fold_minss_commutable(float %a0, float %a1) #1 {
> -  ;CHECK-LABEL: stack_fold_minss_commutable
> -  ;CHECK:       minss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss_commutable:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp olt float %a0, %a1
>    %3 = select i1 %2, float %a0, float %a1
> @@ -850,8 +1708,14 @@ define float @stack_fold_minss_commutabl
>  }
>
>  define <4 x float> @stack_fold_minss_int(<4 x float> %a0, <4 x float> %a1) #0 {
> -  ;CHECK-LABEL: stack_fold_minss_int
> -  ;CHECK:       minss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_minss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    minss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1)
>    ret <4 x float> %2
> @@ -859,8 +1723,15 @@ define <4 x float> @stack_fold_minss_int
>  declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_movddup(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_movddup
> -  ;CHECK:   movddup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movddup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movddup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> undef, <2 x i32> <i32 0, i32 0>
>    ret <2 x double> %2
> @@ -872,48 +1743,86 @@ define <2 x double> @stack_fold_movddup(
>  ; TODO stack_fold_movlps (load / store)
>
>  define <4 x float> @stack_fold_movshdup(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movshdup
> -  ;CHECK:       movshdup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movshdup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[1,1,3,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
>    ret <4 x float> %2
>  }
>
>  define <4 x float> @stack_fold_movsldup(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_movsldup
> -  ;CHECK:       movsldup {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movsldup:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movsldup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,0,2,2]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
>    ret <4 x float> %2
>  }
>
>  define <2 x double> @stack_fold_mulpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulpd
> -  ;CHECK:       mulpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_mulps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulps
> -  ;CHECK:       mulps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_mulsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd
> -  ;CHECK:       mulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_mulsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulsd_int
> -  ;CHECK:       mulsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -923,16 +1832,28 @@ define <2 x double> @stack_fold_mulsd_in
>  }
>
>  define float @stack_fold_mulss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss
> -  ;CHECK:       mulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fmul float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_mulss_int
> -  ;CHECK:       mulss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mulss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    mulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -942,8 +1863,16 @@ define <4 x float> @stack_fold_mulss_int
>  }
>
>  define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_orpd
> -  ;CHECK:       orpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    orpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -955,8 +1884,16 @@ define <2 x double> @stack_fold_orpd(<2
>  }
>
>  define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_orps
> -  ;CHECK:       orps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_orps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    orps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
> @@ -970,8 +1907,14 @@ define <4 x float> @stack_fold_orps(<4 x
>  ; TODO stack_fold_rcpps
>
>  define <4 x float> @stack_fold_rcpps_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rcpps_int
> -  ;CHECK:       rcpps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rcpps_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    rcpps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0)
>    ret <4 x float> %2
> @@ -981,8 +1924,16 @@ declare <4 x float> @llvm.x86.sse.rcp.ps
>  ; TODO stack_fold_rcpss
>
>  define <4 x float> @stack_fold_rcpss_int(<4 x float> %a0, <4 x float> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_rcpss_int
> -  ;CHECK:       rcpss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rcpss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    rcpss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a1)
>    %3 = extractelement <4 x float> %2, i32 0
> @@ -992,8 +1943,14 @@ define <4 x float> @stack_fold_rcpss_int
>  declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>)
>
>  define <2 x double> @stack_fold_roundpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundpd
> -  ;CHECK:       roundpd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    roundpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7)
>    ret <2 x double> %2
> @@ -1001,8 +1958,14 @@ define <2 x double> @stack_fold_roundpd(
>  declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readnone
>
>  define <4 x float> @stack_fold_roundps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_roundps
> -  ;CHECK:       roundps $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    roundps $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7)
>    ret <4 x float> %2
> @@ -1010,8 +1973,15 @@ define <4 x float> @stack_fold_roundps(<
>  declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
>
>  define double @stack_fold_roundsd(double %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_roundsd
> -  ;CHECK:       roundsd $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    roundsd $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call double @llvm.floor.f64(double %a0)
>    ret double %2
> @@ -1019,8 +1989,16 @@ define double @stack_fold_roundsd(double
>  declare double @llvm.floor.f64(double) nounwind readnone
>
>  define <2 x double> @stack_fold_roundsd_int(<2 x double> %a0, <2 x double> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_roundsd_int
> -  ;CHECK:       roundsd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    roundsd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7)
>    ret <2 x double> %2
> @@ -1028,8 +2006,15 @@ define <2 x double> @stack_fold_roundsd_
>  declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
>
>  define float @stack_fold_roundss(float %a0) minsize {
> -  ;CHECK-LABEL: stack_fold_roundss
> -  ;CHECK:       roundss $9, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    roundss $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call float @llvm.floor.f32(float %a0)
>    ret float %2
> @@ -1037,8 +2022,16 @@ define float @stack_fold_roundss(float %
>  declare float @llvm.floor.f32(float) nounwind readnone
>
>  define <4 x float> @stack_fold_roundss_int(<4 x float> %a0, <4 x float> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_roundss_int
> -  ;CHECK:       roundss $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_roundss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    roundss $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1, i32 7)
>    ret <4 x float> %2
> @@ -1048,8 +2041,14 @@ declare <4 x float> @llvm.x86.sse41.roun
>  ; TODO stack_fold_rsqrtps
>
>  define <4 x float> @stack_fold_rsqrtps_int(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_rsqrtps_int
> -  ;CHECK:       rsqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rsqrtps_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    rsqrtps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0)
>    ret <4 x float> %2
> @@ -1059,8 +2058,16 @@ declare <4 x float> @llvm.x86.sse.rsqrt.
>  ; TODO stack_fold_rsqrtss
>
>  define <4 x float> @stack_fold_rsqrtss_int(<4 x float> %a0, <4 x float> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_rsqrtss_int
> -  ;CHECK:       rsqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_rsqrtss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    rsqrtss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a1)
>    %3 = extractelement <4 x float> %2, i32 0
> @@ -1070,8 +2077,17 @@ define <4 x float> @stack_fold_rsqrtss_i
>  declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>)
>
>  define <2 x double> @stack_fold_shufpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufpd
> -  ;CHECK:       shufpd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    shufpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[1],mem[0]
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
>    ; fadd forces execution domain
> @@ -1080,32 +2096,58 @@ define <2 x double> @stack_fold_shufpd(<
>  }
>
>  define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_shufps
> -  ;CHECK:       shufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_shufps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    shufps $200, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0,2],mem[0,3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
>    ret <4 x float> %2
>  }
>
>  define <2 x double> @stack_fold_sqrtpd(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtpd
> -  ;CHECK:       sqrtpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    sqrtpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a0)
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_sqrtps(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_sqrtps
> -  ;CHECK:       sqrtps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    sqrtps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a0)
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_sqrtsd(double %a0) optsize {
> -  ;CHECK-LABEL: stack_fold_sqrtsd
> -  ;CHECK:       sqrtsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    sqrtsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call double @llvm.sqrt.f64(double %a0)
>    ret double %2
> @@ -1113,8 +2155,16 @@ define double @stack_fold_sqrtsd(double
>  declare double @llvm.sqrt.f64(double) nounwind readnone
>
>  define <2 x double> @stack_fold_sqrtsd_int(<2 x double> %a0, <2 x double> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_sqrtsd_int
> -  ;CHECK:       sqrtsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    sqrtsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a1, i64 0
>    %3 = call double @llvm.sqrt.f64(double %2)
> @@ -1125,8 +2175,15 @@ define <2 x double> @stack_fold_sqrtsd_i
>  }
>
>  define float @stack_fold_sqrtss(float %a0) minsize {
> -  ;CHECK-LABEL: stack_fold_sqrtss
> -  ;CHECK:       sqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps %xmm0, %xmm0
> +; CHECK-NEXT:    sqrtss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call float @llvm.sqrt.f32(float %a0)
>    ret float %2
> @@ -1134,8 +2191,16 @@ define float @stack_fold_sqrtss(float %a
>  declare float @llvm.sqrt.f32(float) nounwind readnone
>
>  define <4 x float> @stack_fold_sqrtss_int(<4 x float> %a0, <4 x float> %a1) optsize {
> -  ;CHECK-LABEL: stack_fold_sqrtss_int
> -  ;CHECK:       sqrtss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_sqrtss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    sqrtss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a1, i64 0
>    %3 = call float @llvm.sqrt.f32(float %2)
> @@ -1146,32 +2211,56 @@ define <4 x float> @stack_fold_sqrtss_in
>  }
>
>  define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subpd
> -  ;CHECK:       subpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <2 x double> %a0, %a1
>    ret <2 x double> %2
>  }
>
>  define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subps
> -  ;CHECK:       subps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub <4 x float> %a0, %a1
>    ret <4 x float> %2
>  }
>
>  define double @stack_fold_subsd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd
> -  ;CHECK:       subsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub double %a0, %a1
>    ret double %2
>  }
>
>  define <2 x double> @stack_fold_subsd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_subsd_int
> -  ;CHECK:       subsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subsd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <2 x double> %a0, i32 0
>    %3 = extractelement <2 x double> %a1, i32 0
> @@ -1181,16 +2270,28 @@ define <2 x double> @stack_fold_subsd_in
>  }
>
>  define float @stack_fold_subss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_subss
> -  ;CHECK:       subss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fsub float %a0, %a1
>    ret float %2
>  }
>
>  define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_subss_int
> -  ;CHECK:       subss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_subss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    subss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = extractelement <4 x float> %a0, i32 0
>    %3 = extractelement <4 x float> %a1, i32 0
> @@ -1200,8 +2301,17 @@ define <4 x float> @stack_fold_subss_int
>  }
>
>  define i32 @stack_fold_ucomisd(double %a0, double %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomisd
> -  ;CHECK:       ucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomisd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    ucomisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    sete %al
> +; CHECK-NEXT:    leal -1(%rax,%rax), %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ueq double %a0, %a1
>    %3 = select i1 %2, i32 1, i32 -1
> @@ -1209,8 +2319,18 @@ define i32 @stack_fold_ucomisd(double %a
>  }
>
>  define i32 @stack_fold_ucomisd_int(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomisd_int
> -  ;CHECK:       ucomisd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomisd_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    ucomisd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
>    ret i32 %2
> @@ -1218,8 +2338,17 @@ define i32 @stack_fold_ucomisd_int(<2 x
>  declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone
>
>  define i32 @stack_fold_ucomiss(float %a0, float %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomiss
> -  ;CHECK:       ucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomiss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    ucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    sete %al
> +; CHECK-NEXT:    leal -1(%rax,%rax), %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = fcmp ueq float %a0, %a1
>    %3 = select i1 %2, i32 1, i32 -1
> @@ -1227,8 +2356,18 @@ define i32 @stack_fold_ucomiss(float %a0
>  }
>
>  define i32 @stack_fold_ucomiss_int(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_ucomiss_int
> -  ;CHECK:       ucomiss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ucomiss_int:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    ucomiss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setnp %al
> +; CHECK-NEXT:    sete %cl
> +; CHECK-NEXT:    andb %al, %cl
> +; CHECK-NEXT:    movzbl %cl, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1)
>    ret i32 %2
> @@ -1236,8 +2375,17 @@ define i32 @stack_fold_ucomiss_int(<4 x
>  declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
>
>  define <2 x double> @stack_fold_unpckhpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhpd
> -  ;CHECK:       unpckhpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    unpckhpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[1],mem[1]
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
>    ; fadd forces execution domain
> @@ -1246,8 +2394,17 @@ define <2 x double> @stack_fold_unpckhpd
>  }
>
>  define <4 x float> @stack_fold_unpckhps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpckhps
> -  ;CHECK:       unpckhps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpckhps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
>    ; fadd forces execution domain
> @@ -1256,8 +2413,17 @@ define <4 x float> @stack_fold_unpckhps(
>  }
>
>  define <2 x double> @stack_fold_unpcklpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklpd
> -  ;CHECK:       unpcklpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
>    ; fadd forces execution domain
> @@ -1266,8 +2432,17 @@ define <2 x double> @stack_fold_unpcklpd
>  }
>
>  define <4 x float> @stack_fold_unpcklps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_unpcklps
> -  ;CHECK:       unpcklps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_unpcklps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
>    ; fadd forces execution domain
> @@ -1276,8 +2451,16 @@ define <4 x float> @stack_fold_unpcklps(
>  }
>
>  define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorpd
> -  ;CHECK:       xorpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorpd %xmm1, %xmm1
> +; CHECK-NEXT:    addpd %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <2 x double> %a0 to <2 x i64>
>    %3 = bitcast <2 x double> %a1 to <2 x i64>
> @@ -1289,8 +2472,16 @@ define <2 x double> @stack_fold_xorpd(<2
>  }
>
>  define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_xorps
> -  ;CHECK:       xorps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_xorps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    xorps %xmm1, %xmm1
> +; CHECK-NEXT:    addps %xmm1, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x float> %a0 to <2 x i64>
>    %3 = bitcast <4 x float> %a1 to <2 x i64>
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-int-avx1.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-int-avx1.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-int-avx1.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-int-avx1.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx,+aes,+pclmul < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,8 +10,14 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <2 x i64> @stack_fold_aesdec(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_aesdec
> -  ;CHECK:       vaesdec {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aesdec:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaesdec {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -18,8 +25,14 @@ define <2 x i64> @stack_fold_aesdec(<2 x
>  declare <2 x i64> @llvm.x86.aesni.aesdec(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <2 x i64> @stack_fold_aesdeclast(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_aesdeclast
> -  ;CHECK:       vaesdeclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aesdeclast:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaesdeclast {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -27,8 +40,14 @@ define <2 x i64> @stack_fold_aesdeclast(
>  declare <2 x i64> @llvm.x86.aesni.aesdeclast(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <2 x i64> @stack_fold_aesenc(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_aesenc
> -  ;CHECK:       vaesenc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aesenc:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaesenc {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -36,8 +55,14 @@ define <2 x i64> @stack_fold_aesenc(<2 x
>  declare <2 x i64> @llvm.x86.aesni.aesenc(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <2 x i64> @stack_fold_aesenclast(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_aesenclast
> -  ;CHECK:       vaesenclast {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aesenclast:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaesenclast {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -45,8 +70,14 @@ define <2 x i64> @stack_fold_aesenclast(
>  declare <2 x i64> @llvm.x86.aesni.aesenclast(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <2 x i64> @stack_fold_aesimc(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_aesimc
> -  ;CHECK:       vaesimc {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aesimc:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaesimc {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64> %a0)
>    ret <2 x i64> %2
> @@ -54,8 +85,14 @@ define <2 x i64> @stack_fold_aesimc(<2 x
>  declare <2 x i64> @llvm.x86.aesni.aesimc(<2 x i64>) nounwind readnone
>
>  define <2 x i64> @stack_fold_aeskeygenassist(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_aeskeygenassist
> -  ;CHECK:       vaeskeygenassist $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_aeskeygenassist:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vaeskeygenassist $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %a0, i8 7)
>    ret <2 x i64> %2
> @@ -63,8 +100,47 @@ define <2 x i64> @stack_fold_aeskeygenas
>  declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8) nounwind readnone
>
>  define <4 x i32> @stack_fold_movd_load(i32 %a0) {
> -  ;CHECK-LABEL: stack_fold_movd_load
> -  ;CHECK:       movd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movd_load:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0],zero,zero,zero
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = insertelement <4 x i32> zeroinitializer, i32 %a0, i32 0
>    ; add forces execution domain
> @@ -73,8 +149,45 @@ define <4 x i32> @stack_fold_movd_load(i
>  }
>
>  define i32 @stack_fold_movd_store(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_movd_store
> -  ;CHECK:       movd {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
> +; CHECK-LABEL: stack_fold_movd_store:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    ; add forces execution domain
>    %1 = add <4 x i32> %a0, %a1
>    %2 = extractelement <4 x i32> %1, i32 0
> @@ -83,8 +196,17 @@ define i32 @stack_fold_movd_store(<4 x i
>  }
>
>  define <2 x i64> @stack_fold_movq_load(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_movq_load
> -  ;CHECK:       movq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_movq_load:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0],zero
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
>    ; add forces execution domain
> @@ -93,8 +215,45 @@ define <2 x i64> @stack_fold_movq_load(<
>  }
>
>  define i64 @stack_fold_movq_store(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_movq_store
> -  ;CHECK:       movq {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
> +; CHECK-LABEL: stack_fold_movq_store:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    vmovq %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    ; add forces execution domain
>    %1 = add <2 x i64> %a0, %a1
>    %2 = extractelement <2 x i64> %1, i32 0
> @@ -103,8 +262,14 @@ define i64 @stack_fold_movq_store(<2 x i
>  }
>
>  define <8 x i16> @stack_fold_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_mpsadbw
> -  ;CHECK:       vmpsadbw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mpsadbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmpsadbw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7)
>    ret <8 x i16> %2
> @@ -112,8 +277,14 @@ define <8 x i16> @stack_fold_mpsadbw(<16
>  declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
>
>  define <16 x i8> @stack_fold_pabsb(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsb
> -  ;CHECK:       vpabsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i8> %a0, zeroinitializer
>    %3 = sub <16 x i8> zeroinitializer, %a0
> @@ -122,8 +293,14 @@ define <16 x i8> @stack_fold_pabsb(<16 x
>  }
>
>  define <4 x i32> @stack_fold_pabsd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsd
> -  ;CHECK:       vpabsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <4 x i32> %a0, zeroinitializer
>    %3 = sub <4 x i32> zeroinitializer, %a0
> @@ -132,8 +309,14 @@ define <4 x i32> @stack_fold_pabsd(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pabsw(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsw
> -  ;CHECK:       vpabsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i16> %a0, zeroinitializer
>    %3 = sub <8 x i16> zeroinitializer, %a0
> @@ -142,8 +325,14 @@ define <8 x i16> @stack_fold_pabsw(<8 x
>  }
>
>  define <8 x i16> @stack_fold_packssdw(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_packssdw
> -  ;CHECK:       vpackssdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packssdw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackssdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
>    ret <8 x i16> %2
> @@ -151,8 +340,14 @@ define <8 x i16> @stack_fold_packssdw(<4
>  declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <16 x i8> @stack_fold_packsswb(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_packsswb
> -  ;CHECK:       vpacksswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packsswb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpacksswb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <16 x i8> %2
> @@ -160,8 +355,14 @@ define <16 x i8> @stack_fold_packsswb(<8
>  declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_packusdw(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_packusdw
> -  ;CHECK:       vpackusdw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packusdw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackusdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1)
>    ret <8 x i16> %2
> @@ -169,8 +370,14 @@ define <8 x i16> @stack_fold_packusdw(<4
>  declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <16 x i8> @stack_fold_packuswb(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_packuswb
> -  ;CHECK:       vpackuswb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packuswb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackuswb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <16 x i8> %2
> @@ -178,32 +385,56 @@ define <16 x i8> @stack_fold_packuswb(<8
>  declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_paddb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddb
> -  ;CHECK:       vpaddb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <16 x i8> %a0, %a1
>    ret <16 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_paddd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddd
> -  ;CHECK:       vpaddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <4 x i32> %a0, %a1
>    ret <4 x i32> %2
>  }
>
>  define <2 x i64> @stack_fold_paddq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddq
> -  ;CHECK:       vpaddq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <2 x i64> %a0, %a1
>    ret <2 x i64> %2
>  }
>
>  define <16 x i8> @stack_fold_paddsb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddsb
> -  ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddsb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -211,8 +442,14 @@ define <16 x i8> @stack_fold_paddsb(<16
>  declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <8 x i16> @stack_fold_paddsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddsw
> -  ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -220,8 +457,14 @@ define <8 x i16> @stack_fold_paddsw(<8 x
>  declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_paddusb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddusb
> -  ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddusb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddusb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -229,8 +472,14 @@ define <16 x i8> @stack_fold_paddusb(<16
>  declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <8 x i16> @stack_fold_paddusw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddusw
> -  ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddusw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddusw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -238,24 +487,45 @@ define <8 x i16> @stack_fold_paddusw(<8
>  declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_paddw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddw
> -  ;CHECK:       vpaddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <8 x i16> %a0, %a1
>    ret <8 x i16> %2
>  }
>
>  define <16 x i8> @stack_fold_palignr(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_palignr
> -  ;CHECK:       vpalignr $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_palignr:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpalignr $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a1, <16 x i8> %a0, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
>    ret <16 x i8> %2
>  }
>
>  define <16 x i8> @stack_fold_pand(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pand
> -  ;CHECK:       vpand {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pand:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpand {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = and <16 x i8> %a0, %a1
>    ; add forces execution domain
> @@ -264,8 +534,16 @@ define <16 x i8> @stack_fold_pand(<16 x
>  }
>
>  define <16 x i8> @stack_fold_pandn(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pandn
> -  ;CHECK:       vpandn {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pandn:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpandn {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = xor <16 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
>    %3 = and <16 x i8> %2, %a1
> @@ -275,8 +553,14 @@ define <16 x i8> @stack_fold_pandn(<16 x
>  }
>
>  define <16 x i8> @stack_fold_pavgb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pavgb
> -  ;CHECK:       vpavgb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pavgb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpavgb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <16 x i8> %a0 to <16 x i16>
>    %3 = zext <16 x i8> %a1 to <16 x i16>
> @@ -288,8 +572,14 @@ define <16 x i8> @stack_fold_pavgb(<16 x
>  }
>
>  define <8 x i16> @stack_fold_pavgw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pavgw
> -  ;CHECK:       vpavgw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pavgw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpavgw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <8 x i16> %a0 to <8 x i32>
>    %3 = zext <8 x i16> %a1 to <8 x i32>
> @@ -301,8 +591,14 @@ define <8 x i16> @stack_fold_pavgw(<8 x
>  }
>
>  define <16 x i8> @stack_fold_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %c) {
> -  ;CHECK-LABEL: stack_fold_pblendvb
> -  ;CHECK:       vpblendvb {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendvb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendvb %xmm0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a1, <16 x i8> %c, <16 x i8> %a0)
>    ret <16 x i8> %2
> @@ -310,16 +606,29 @@ define <16 x i8> @stack_fold_pblendvb(<1
>  declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
>
>  define <8 x i16> @stack_fold_pblendw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pblendw
> -  ;CHECK:       vpblendw $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,1,2],xmm0[3,4,5,6,7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ret <8 x i16> %2
>  }
>
>  define <2 x i64> @stack_fold_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_pclmulqdq
> -  ;CHECK:       vpclmulqdq $0, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pclmulqdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpclmulqdq $0, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0)
>    ret <2 x i64> %2
> @@ -327,8 +636,14 @@ define <2 x i64> @stack_fold_pclmulqdq(<
>  declare <2 x i64> @llvm.x86.pclmulqdq(<2 x i64>, <2 x i64>, i8) nounwind readnone
>
>  define <16 x i8> @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqb
> -  ;CHECK:       vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <16 x i8> %a0, %a1
>    %3 = sext <16 x i1> %2 to <16 x i8>
> @@ -336,8 +651,14 @@ define <16 x i8> @stack_fold_pcmpeqb(<16
>  }
>
>  define <4 x i32> @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqd
> -  ;CHECK:       vpcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <4 x i32> %a0, %a1
>    %3 = sext <4 x i1> %2 to <4 x i32>
> @@ -345,8 +666,14 @@ define <4 x i32> @stack_fold_pcmpeqd(<4
>  }
>
>  define <2 x i64> @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqq
> -  ;CHECK:       vpcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <2 x i64> %a0, %a1
>    %3 = sext <2 x i1> %2 to <2 x i64>
> @@ -354,8 +681,14 @@ define <2 x i64> @stack_fold_pcmpeqq(<2
>  }
>
>  define <8 x i16> @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqw
> -  ;CHECK:       vpcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <8 x i16> %a0, %a1
>    %3 = sext <8 x i1> %2 to <8 x i16>
> @@ -363,8 +696,17 @@ define <8 x i16> @stack_fold_pcmpeqw(<8
>  }
>
>  define i32 @stack_fold_pcmpestri(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpestri
> -  ;CHECK:       vpcmpestri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpestri:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl $7, %eax
> +; CHECK-NEXT:    movl $7, %edx
> +; CHECK-NEXT:    vpcmpestri $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    movl %ecx, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
>    %2 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
>    ret i32 %2
> @@ -372,8 +714,16 @@ define i32 @stack_fold_pcmpestri(<16 x i
>  declare i32 @llvm.x86.sse42.pcmpestri128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
>
>  define <16 x i8> @stack_fold_pcmpestrm(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpestrm
> -  ;CHECK:       vpcmpestrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpestrm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl $7, %eax
> +; CHECK-NEXT:    movl $7, %edx
> +; CHECK-NEXT:    vpcmpestrm $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{rax},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
>    ret <16 x i8> %2
> @@ -381,8 +731,14 @@ define <16 x i8> @stack_fold_pcmpestrm(<
>  declare <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8>, i32, <16 x i8>, i32, i8) nounwind readnone
>
>  define <16 x i8> @stack_fold_pcmpgtb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtb
> -  ;CHECK:       vpcmpgtb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i8> %a0, %a1
>    %3 = sext <16 x i1> %2 to <16 x i8>
> @@ -390,8 +746,14 @@ define <16 x i8> @stack_fold_pcmpgtb(<16
>  }
>
>  define <4 x i32> @stack_fold_pcmpgtd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtd
> -  ;CHECK:       vpcmpgtd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <4 x i32> %a0, %a1
>    %3 = sext <4 x i1> %2 to <4 x i32>
> @@ -399,8 +761,14 @@ define <4 x i32> @stack_fold_pcmpgtd(<4
>  }
>
>  define <2 x i64> @stack_fold_pcmpgtq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtq
> -  ;CHECK:       vpcmpgtq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <2 x i64> %a0, %a1
>    %3 = sext <2 x i1> %2 to <2 x i64>
> @@ -408,8 +776,14 @@ define <2 x i64> @stack_fold_pcmpgtq(<2
>  }
>
>  define <8 x i16> @stack_fold_pcmpgtw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtw
> -  ;CHECK:       vpcmpgtw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i16> %a0, %a1
>    %3 = sext <8 x i1> %2 to <8 x i16>
> @@ -417,8 +791,15 @@ define <8 x i16> @stack_fold_pcmpgtw(<8
>  }
>
>  define i32 @stack_fold_pcmpistri(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpistri
> -  ;CHECK:       vpcmpistri $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpistri:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpistri $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    movl %ecx, %eax
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
>    ret i32 %2
> @@ -426,8 +807,14 @@ define i32 @stack_fold_pcmpistri(<16 x i
>  declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind readnone
>
>  define <16 x i8> @stack_fold_pcmpistrm(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpistrm
> -  ;CHECK:       vpcmpistrm $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpistrm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpistrm $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
>    ret <16 x i8> %2
> @@ -437,9 +824,45 @@ declare <16 x i8> @llvm.x86.sse42.pcmpis
>  ; TODO stack_fold_pextrb
>
>  define i32 @stack_fold_pextrd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pextrd
> -  ;CHECK:       vpextrd $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 4-byte Folded Spill
> -  ;CHECK:       movl    {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Reload
> +; CHECK-LABEL: stack_fold_pextrd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    vpextrd $1, %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    ; add forces execution domain
>    %1 = add <4 x i32> %a0, %a1
>    %2 = extractelement <4 x i32> %1, i32 1
> @@ -448,9 +871,44 @@ define i32 @stack_fold_pextrd(<4 x i32>
>  }
>
>  define i64 @stack_fold_pextrq(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_pextrq
> -  ;CHECK:       vpextrq $1, {{%xmm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 8-byte Folded Spill
> -  ;CHECK:       movq    {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Reload
> +; CHECK-LABEL: stack_fold_pextrq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    vpextrq $1, %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = extractelement <2 x i64> %a0, i32 1
>    %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    ret i64 %1
> @@ -459,8 +917,14 @@ define i64 @stack_fold_pextrq(<2 x i64>
>  ; TODO stack_fold_pextrw
>
>  define <4 x i32> @stack_fold_phaddd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddd
> -  ;CHECK:       vphaddd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -468,8 +932,14 @@ define <4 x i32> @stack_fold_phaddd(<4 x
>  declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i16> @stack_fold_phaddsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddsw
> -  ;CHECK:       vphaddsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -477,8 +947,14 @@ define <8 x i16> @stack_fold_phaddsw(<8
>  declare <8 x i16> @llvm.x86.ssse3.phadd.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_phaddw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddw
> -  ;CHECK:       vphaddw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -486,8 +962,14 @@ define <8 x i16> @stack_fold_phaddw(<8 x
>  declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_phminposuw(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_phminposuw
> -  ;CHECK:       vphminposuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phminposuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphminposuw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0)
>    ret <8 x i16> %2
> @@ -495,8 +977,14 @@ define <8 x i16> @stack_fold_phminposuw(
>  declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
>
>  define <4 x i32> @stack_fold_phsubd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubd
> -  ;CHECK:       vphsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -504,8 +992,14 @@ define <4 x i32> @stack_fold_phsubd(<4 x
>  declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i16> @stack_fold_phsubsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubsw
> -  ;CHECK:       vphsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -513,8 +1007,14 @@ define <8 x i16> @stack_fold_phsubsw(<8
>  declare <8 x i16> @llvm.x86.ssse3.phsub.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_phsubw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubw
> -  ;CHECK:       vphsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -522,40 +1022,190 @@ define <8 x i16> @stack_fold_phsubw(<8 x
>  declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_pinsrb(<16 x i8> %a0, i8 %a1) {
> -  ;CHECK-LABEL: stack_fold_pinsrb
> -  ;CHECK:       vpinsrb $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pinsrb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpinsrb $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = insertelement <16 x i8> %a0, i8 %a1, i32 1
>    ret <16 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_pinsrd(<4 x i32> %a0, i32 %a1) {
> -  ;CHECK-LABEL: stack_fold_pinsrd
> -  ;CHECK:       vpinsrd $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pinsrd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpinsrd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = insertelement <4 x i32> %a0, i32 %a1, i32 1
>    ret <4 x i32> %2
>  }
>
>  define <2 x i64> @stack_fold_pinsrq(<2 x i64> %a0, i64 %a1) {
> -  ;CHECK-LABEL: stack_fold_pinsrq
> -  ;CHECK:       vpinsrq $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pinsrq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpinsrq $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = insertelement <2 x i64> %a0, i64 %a1, i32 1
>    ret <2 x i64> %2
>  }
>
>  define <8 x i16> @stack_fold_pinsrw(<8 x i16> %a0, i16 %a1) {
> -  ;CHECK-LABEL: stack_fold_pinsrw
> -  ;CHECK:       vpinsrw $1, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 4-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pinsrw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    pushq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    pushq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    pushq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    pushq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    pushq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    pushq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 56
> +; CHECK-NEXT:    .cfi_offset %rbx, -56
> +; CHECK-NEXT:    .cfi_offset %r12, -48
> +; CHECK-NEXT:    .cfi_offset %r13, -40
> +; CHECK-NEXT:    .cfi_offset %r14, -32
> +; CHECK-NEXT:    .cfi_offset %r15, -24
> +; CHECK-NEXT:    .cfi_offset %rbp, -16
> +; CHECK-NEXT:    movl %edi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpinsrw $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
> +; CHECK-NEXT:    popq %rbx
> +; CHECK-NEXT:    .cfi_def_cfa_offset 48
> +; CHECK-NEXT:    popq %r12
> +; CHECK-NEXT:    .cfi_def_cfa_offset 40
> +; CHECK-NEXT:    popq %r13
> +; CHECK-NEXT:    .cfi_def_cfa_offset 32
> +; CHECK-NEXT:    popq %r14
> +; CHECK-NEXT:    .cfi_def_cfa_offset 24
> +; CHECK-NEXT:    popq %r15
> +; CHECK-NEXT:    .cfi_def_cfa_offset 16
> +; CHECK-NEXT:    popq %rbp
> +; CHECK-NEXT:    .cfi_def_cfa_offset 8
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"()
>    %2 = insertelement <8 x i16> %a0, i16 %a1, i32 1
>    ret <8 x i16> %2
>  }
>
>  define <8 x i16> @stack_fold_pmaddubsw(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaddubsw
> -  ;CHECK:       vpmaddubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaddubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaddubsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> %a1)
>    ret <8 x i16> %2
> @@ -563,8 +1213,14 @@ define <8 x i16> @stack_fold_pmaddubsw(<
>  declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <4 x i32> @stack_fold_pmaddwd(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaddwd
> -  ;CHECK:       vpmaddwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaddwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1)
>    ret <4 x i32> %2
> @@ -572,8 +1228,14 @@ define <4 x i32> @stack_fold_pmaddwd(<8
>  declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_pmaxsb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsb
> -  ;CHECK:       vpmaxsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i8> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i8> %a0, <16 x i8> %a1
> @@ -581,8 +1243,14 @@ define <16 x i8> @stack_fold_pmaxsb(<16
>  }
>
>  define <4 x i32> @stack_fold_pmaxsd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsd
> -  ;CHECK:       vpmaxsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <4 x i32> %a0, %a1
>    %3 = select <4 x i1> %2, <4 x i32> %a0, <4 x i32> %a1
> @@ -590,8 +1258,14 @@ define <4 x i32> @stack_fold_pmaxsd(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pmaxsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsw
> -  ;CHECK:       vpmaxsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i16> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i16> %a0, <8 x i16> %a1
> @@ -599,8 +1273,14 @@ define <8 x i16> @stack_fold_pmaxsw(<8 x
>  }
>
>  define <16 x i8> @stack_fold_pmaxub(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxub
> -  ;CHECK:       vpmaxub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxub:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxub {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <16 x i8> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i8> %a0, <16 x i8> %a1
> @@ -608,8 +1288,14 @@ define <16 x i8> @stack_fold_pmaxub(<16
>  }
>
>  define <4 x i32> @stack_fold_pmaxud(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxud
> -  ;CHECK:       vpmaxud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxud:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxud {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <4 x i32> %a0, %a1
>    %3 = select <4 x i1> %2, <4 x i32> %a0, <4 x i32> %a1
> @@ -617,8 +1303,14 @@ define <4 x i32> @stack_fold_pmaxud(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pmaxuw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxuw
> -  ;CHECK:       vpmaxuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxuw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <8 x i16> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i16> %a0, <8 x i16> %a1
> @@ -626,8 +1318,14 @@ define <8 x i16> @stack_fold_pmaxuw(<8 x
>  }
>
>  define <16 x i8> @stack_fold_pminsb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsb
> -  ;CHECK:       vpminsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <16 x i8> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i8> %a0, <16 x i8> %a1
> @@ -635,8 +1333,14 @@ define <16 x i8> @stack_fold_pminsb(<16
>  }
>
>  define <4 x i32> @stack_fold_pminsd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsd
> -  ;CHECK:       vpminsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <4 x i32> %a0, %a1
>    %3 = select <4 x i1> %2, <4 x i32> %a0, <4 x i32> %a1
> @@ -644,8 +1348,14 @@ define <4 x i32> @stack_fold_pminsd(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pminsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsw
> -  ;CHECK:       vpminsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <8 x i16> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i16> %a0, <8 x i16> %a1
> @@ -653,8 +1363,14 @@ define <8 x i16> @stack_fold_pminsw(<8 x
>  }
>
>  define <16 x i8> @stack_fold_pminub(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminub
> -  ;CHECK:       vpminub {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminub:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminub {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <16 x i8> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i8> %a0, <16 x i8> %a1
> @@ -662,8 +1378,14 @@ define <16 x i8> @stack_fold_pminub(<16
>  }
>
>  define <4 x i32> @stack_fold_pminud(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminud
> -  ;CHECK:       vpminud {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminud:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminud {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <4 x i32> %a0, %a1
>    %3 = select <4 x i1> %2, <4 x i32> %a0, <4 x i32> %a1
> @@ -671,8 +1393,14 @@ define <4 x i32> @stack_fold_pminud(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pminuw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminuw
> -  ;CHECK:       vpminuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminuw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <8 x i16> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i16> %a0, <8 x i16> %a1
> @@ -680,8 +1408,14 @@ define <8 x i16> @stack_fold_pminuw(<8 x
>  }
>
>  define <2 x i64> @stack_fold_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmuldq
> -  ;CHECK:       vpmuldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmuldq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmuldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x i32> %a0 to <2 x i64>
>    %3 = bitcast <4 x i32> %a1 to <2 x i64>
> @@ -694,8 +1428,14 @@ define <2 x i64> @stack_fold_pmuldq(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pmulhrsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhrsw
> -  ;CHECK:       vpmulhrsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhrsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhrsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -703,8 +1443,14 @@ define <8 x i16> @stack_fold_pmulhrsw(<8
>  declare <8 x i16> @llvm.x86.ssse3.pmul.hr.sw.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_pmulhuw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhuw
> -  ;CHECK:       vpmulhuw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhuw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -712,8 +1458,14 @@ define <8 x i16> @stack_fold_pmulhuw(<8
>  declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_pmulhw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhw
> -  ;CHECK:       vpmulhw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -721,24 +1473,42 @@ define <8 x i16> @stack_fold_pmulhw(<8 x
>  declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <4 x i32> @stack_fold_pmulld(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulld
> -  ;CHECK:       vpmulld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulld {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = mul <4 x i32> %a0, %a1
>    ret <4 x i32> %2
>  }
>
>  define <8 x i16> @stack_fold_pmullw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmullw
> -  ;CHECK:       vpmullw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmullw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmullw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = mul <8 x i16> %a0, %a1
>    ret <8 x i16> %2
>  }
>
>  define <2 x i64> @stack_fold_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmuludq
> -  ;CHECK:       vpmuludq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmuludq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmuludq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <4 x i32> %a0 to <2 x i64>
>    %3 = bitcast <4 x i32> %a1 to <2 x i64>
> @@ -749,8 +1519,16 @@ define <2 x i64> @stack_fold_pmuludq(<4
>  }
>
>  define <16 x i8> @stack_fold_por(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_por
> -  ;CHECK:       vpor {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_por:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpor {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = or <16 x i8> %a0, %a1
>    ; add forces execution domain
> @@ -759,8 +1537,14 @@ define <16 x i8> @stack_fold_por(<16 x i
>  }
>
>  define <2 x i64> @stack_fold_psadbw(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psadbw
> -  ;CHECK:       vpsadbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psadbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsadbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1)
>    ret <2 x i64> %2
> @@ -768,8 +1552,14 @@ define <2 x i64> @stack_fold_psadbw(<16
>  declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <16 x i8> @stack_fold_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pshufb
> -  ;CHECK:       vpshufb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshufb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -777,8 +1567,17 @@ define <16 x i8> @stack_fold_pshufb(<16
>  declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <4 x i32> @stack_fold_pshufd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pshufd
> -  ;CHECK:       vpshufd $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshufd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufd $27, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[3,2,1,0]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
>    ; add forces execution domain
> @@ -787,24 +1586,44 @@ define <4 x i32> @stack_fold_pshufd(<4 x
>  }
>
>  define <8 x i16> @stack_fold_pshufhw(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pshufhw
> -  ;CHECK:       vpshufhw $11, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshufhw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufhw $11, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,1,2,3,7,6,4,4]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 4, i32 4>
>    ret <8 x i16> %2
>  }
>
>  define <8 x i16> @stack_fold_pshuflw(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pshuflw
> -  ;CHECK:       vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshuflw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshuflw $27, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[3,2,1,0,4,5,6,7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
>    ret <8 x i16> %2
>  }
>
>  define <16 x i8> @stack_fold_psignb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignb
> -  ;CHECK:       vpsignb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -812,8 +1631,14 @@ define <16 x i8> @stack_fold_psignb(<16
>  declare <16 x i8> @llvm.x86.ssse3.psign.b.128(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psignd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignd
> -  ;CHECK:       vpsignd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -821,8 +1646,14 @@ define <4 x i32> @stack_fold_psignd(<4 x
>  declare <4 x i32> @llvm.x86.ssse3.psign.d.128(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psignw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignw
> -  ;CHECK:       vpsignw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -830,8 +1661,14 @@ define <8 x i16> @stack_fold_psignw(<8 x
>  declare <8 x i16> @llvm.x86.ssse3.psign.w.128(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <4 x i32> @stack_fold_pslld(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pslld
> -  ;CHECK:       vpslld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pslld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpslld {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -839,8 +1676,14 @@ define <4 x i32> @stack_fold_pslld(<4 x
>  declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <2 x i64> @stack_fold_psllq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllq
> -  ;CHECK:       vpsllq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -848,8 +1691,14 @@ define <2 x i64> @stack_fold_psllq(<2 x
>  declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psllw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllw
> -  ;CHECK:       vpsllw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -857,8 +1706,14 @@ define <8 x i16> @stack_fold_psllw(<8 x
>  declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psrad(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrad
> -  ;CHECK:       vpsrad {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrad:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrad {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -866,8 +1721,14 @@ define <4 x i32> @stack_fold_psrad(<4 x
>  declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psraw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psraw
> -  ;CHECK:       vpsraw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psraw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsraw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -875,8 +1736,14 @@ define <8 x i16> @stack_fold_psraw(<8 x
>  declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psrld(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrld
> -  ;CHECK:       vpsrld {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrld {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -884,8 +1751,14 @@ define <4 x i32> @stack_fold_psrld(<4 x
>  declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <2 x i64> @stack_fold_psrlq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlq
> -  ;CHECK:       vpsrlq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -893,8 +1766,14 @@ define <2 x i64> @stack_fold_psrlq(<2 x
>  declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psrlw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlw
> -  ;CHECK:       vpsrlw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -902,32 +1781,56 @@ define <8 x i16> @stack_fold_psrlw(<8 x
>  declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_psubb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubb
> -  ;CHECK:       vpsubb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <16 x i8> %a0, %a1
>    ret <16 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_psubd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubd
> -  ;CHECK:       vpsubd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <4 x i32> %a0, %a1
>    ret <4 x i32> %2
>  }
>
>  define <2 x i64> @stack_fold_psubq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubq
> -  ;CHECK:       vpsubq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <2 x i64> %a0, %a1
>    ret <2 x i64> %2
>  }
>
>  define <16 x i8> @stack_fold_psubsb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubsb
> -  ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubsb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -935,8 +1838,14 @@ define <16 x i8> @stack_fold_psubsb(<16
>  declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psubsw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubsw
> -  ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubsw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -944,8 +1853,14 @@ define <8 x i16> @stack_fold_psubsw(<8 x
>  declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <16 x i8> @stack_fold_psubusb(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubusb
> -  ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubusb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubusb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1)
>    ret <16 x i8> %2
> @@ -953,8 +1868,14 @@ define <16 x i8> @stack_fold_psubusb(<16
>  declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psubusw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubusw
> -  ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubusw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubusw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1)
>    ret <8 x i16> %2
> @@ -962,16 +1883,30 @@ define <8 x i16> @stack_fold_psubusw(<8
>  declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i16> @stack_fold_psubw(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubw
> -  ;CHECK:       vpsubw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <8 x i16> %a0, %a1
>    ret <8 x i16> %2
>  }
>
>  define i32 @stack_fold_ptest(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_ptest
> -  ;CHECK:       vptest {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ptest:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vptest {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1)
>    ret i32 %2
> @@ -979,8 +1914,17 @@ define i32 @stack_fold_ptest(<2 x i64> %
>  declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define i32 @stack_fold_ptest_ymm(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_ptest_ymm
> -  ;CHECK:       vptest {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_ptest_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    xorl %eax, %eax
> +; CHECK-NEXT:    vptest {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    setb %al
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1)
>    ret i32 %2
> @@ -988,16 +1932,32 @@ define i32 @stack_fold_ptest_ymm(<4 x i6
>  declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
>
>  define <16 x i8> @stack_fold_punpckhbw(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhbw
> -  ;CHECK:       vpunpckhbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[8],mem[8],xmm0[9],mem[9],xmm0[10],mem[10],xmm0[11],mem[11],xmm0[12],mem[12],xmm0[13],mem[13],xmm0[14],mem[14],xmm0[15],mem[15]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
>    ret <16 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_punpckhdq(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhdq
> -  ;CHECK:       vpunpckhdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[2],mem[2],xmm0[3],mem[3]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
>    ; add forces execution domain
> @@ -1006,8 +1966,17 @@ define <4 x i32> @stack_fold_punpckhdq(<
>  }
>
>  define <2 x i64> @stack_fold_punpckhqdq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhqdq
> -  ;CHECK:       vpunpckhqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhqdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[1],mem[1]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
>    ; add forces execution domain
> @@ -1016,24 +1985,47 @@ define <2 x i64> @stack_fold_punpckhqdq(
>  }
>
>  define <8 x i16> @stack_fold_punpckhwd(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhwd
> -  ;CHECK:       vpunpckhwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
>    ret <8 x i16> %2
>  }
>
>  define <16 x i8> @stack_fold_punpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklbw
> -  ;CHECK:       vpunpcklbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
>    ret <16 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_punpckldq(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckldq
> -  ;CHECK:       vpunpckldq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckldq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
>    ; add forces execution domain
> @@ -1042,8 +2034,17 @@ define <4 x i32> @stack_fold_punpckldq(<
>  }
>
>  define <2 x i64> @stack_fold_punpcklqdq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklqdq
> -  ;CHECK:       vpunpcklqdq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklqdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
>    ; add forces execution domain
> @@ -1052,16 +2053,31 @@ define <2 x i64> @stack_fold_punpcklqdq(
>  }
>
>  define <8 x i16> @stack_fold_punpcklwd(<8 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklwd
> -  ;CHECK:       vpunpcklwd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
>    ret <8 x i16> %2
>  }
>
>  define <16 x i8> @stack_fold_pxor(<16 x i8> %a0, <16 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pxor
> -  ;CHECK:       vpxor {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pxor:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpxor {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = xor <16 x i8> %a0, %a1
>    ; add forces execution domain
>
> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll?rev=369876&r1=369875&r2=369876&view=diff
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll Sun Aug 25 13:48:14 2019
> @@ -1,3 +1,4 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
>  ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx2 < %s | FileCheck %s
>
>  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> @@ -9,8 +10,16 @@ target triple = "x86_64-unknown-unknown"
>  ; relevant registers and check that the reload is correctly folded into the instruction.
>
>  define <4 x double> @stack_fold_broadcastsd_ymm(<2 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_broadcastsd_ymm
> -  ;CHECK:       vbroadcastsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_broadcastsd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vbroadcastsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> zeroinitializer
>    ; fadd forces execution domain
> @@ -19,8 +28,16 @@ define <4 x double> @stack_fold_broadcas
>  }
>
>  define <4 x float> @stack_fold_broadcastss(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_broadcastss
> -  ;CHECK:       vbroadcastss {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_broadcastss:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vbroadcastss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
> +; CHECK-NEXT:    vaddps %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
>    ; fadd forces execution domain
> @@ -29,8 +46,16 @@ define <4 x float> @stack_fold_broadcast
>  }
>
>  define <8 x float> @stack_fold_broadcastss_ymm(<4 x float> %a0) {
> -  ;CHECK-LABEL: stack_fold_broadcastss_ymm
> -  ;CHECK:       vbroadcastss {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_broadcastss_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vbroadcastss {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
> +; CHECK-NEXT:    vaddps %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> zeroinitializer
>    ; fadd forces execution domain
> @@ -39,8 +64,16 @@ define <8 x float> @stack_fold_broadcast
>  }
>
>  define <4 x i32> @stack_fold_extracti128(<8 x i16> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_extracti128
> -  ;CHECK:       vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill
> +; CHECK-LABEL: stack_fold_extracti128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
> +; CHECK-NEXT:    vextracti128 $1, %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Folded Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
> +; CHECK-NEXT:    vzeroupper
> +; CHECK-NEXT:    retq
>    ; zext forces execution domain
>    %t1 = zext <8 x i16> %a0 to <8 x i32>
>    %t2 = shufflevector <8 x i32> %t1, <8 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
> @@ -49,8 +82,16 @@ define <4 x i32> @stack_fold_extracti128
>  }
>
>  define <8 x i32> @stack_fold_inserti128(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_inserti128
> -  ;CHECK:       vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_inserti128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ; add forces execution domain
> @@ -59,8 +100,14 @@ define <8 x i32> @stack_fold_inserti128(
>  }
>
>  define <16 x i16> @stack_fold_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_mpsadbw
> -  ;CHECK:       vmpsadbw $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_mpsadbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vmpsadbw $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7)
>    ret <16 x i16> %2
> @@ -68,8 +115,14 @@ define <16 x i16> @stack_fold_mpsadbw(<3
>  declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
>
>  define <32 x i8> @stack_fold_pabsb(<32 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsb
> -  ;CHECK:       vpabsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <32 x i8> %a0, zeroinitializer
>    %3 = sub <32 x i8> zeroinitializer, %a0
> @@ -78,8 +131,14 @@ define <32 x i8> @stack_fold_pabsb(<32 x
>  }
>
>  define <8 x i32> @stack_fold_pabsd(<8 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsd
> -  ;CHECK:       vpabsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i32> %a0, zeroinitializer
>    %3 = sub <8 x i32> zeroinitializer, %a0
> @@ -88,8 +147,14 @@ define <8 x i32> @stack_fold_pabsd(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pabsw(<16 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pabsw
> -  ;CHECK:       vpabsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pabsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpabsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i16> %a0, zeroinitializer
>    %3 = sub <16 x i16> zeroinitializer, %a0
> @@ -98,8 +163,14 @@ define <16 x i16> @stack_fold_pabsw(<16
>  }
>
>  define <16 x i16> @stack_fold_packssdw(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_packssdw
> -  ;CHECK:       vpackssdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packssdw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackssdw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
>    ret <16 x i16> %2
> @@ -107,8 +178,14 @@ define <16 x i16> @stack_fold_packssdw(<
>  declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <32 x i8> @stack_fold_packsswb(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_packsswb
> -  ;CHECK:       vpacksswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packsswb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpacksswb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
>    ret <32 x i8> %2
> @@ -116,8 +193,14 @@ define <32 x i8> @stack_fold_packsswb(<1
>  declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_packusdw(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_packusdw
> -  ;CHECK:       vpackusdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packusdw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackusdw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
>    ret <16 x i16> %2
> @@ -125,8 +208,14 @@ define <16 x i16> @stack_fold_packusdw(<
>  declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <32 x i8> @stack_fold_packuswb(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_packuswb
> -  ;CHECK:       vpackuswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_packuswb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpackuswb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1)
>    ret <32 x i8> %2
> @@ -134,32 +223,56 @@ define <32 x i8> @stack_fold_packuswb(<1
>  declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <32 x i8> @stack_fold_paddb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddb
> -  ;CHECK:       vpaddb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <32 x i8> %a0, %a1
>    ret <32 x i8> %2
>  }
>
>  define <8 x i32> @stack_fold_paddd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddd
> -  ;CHECK:       vpaddd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <8 x i32> %a0, %a1
>    ret <8 x i32> %2
>  }
>
>  define <4 x i64> @stack_fold_paddq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddq
> -  ;CHECK:       vpaddq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <4 x i64> %a0, %a1
>    ret <4 x i64> %2
>  }
>
>  define <32 x i8> @stack_fold_paddsb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddsb
> -  ;CHECK:       vpaddsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddsb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -167,8 +280,14 @@ define <32 x i8> @stack_fold_paddsb(<32
>  declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <16 x i16> @stack_fold_paddsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddsw
> -  ;CHECK:       vpaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -176,8 +295,14 @@ define <16 x i16> @stack_fold_paddsw(<16
>  declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <32 x i8> @stack_fold_paddusb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddusb
> -  ;CHECK:       vpaddusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddusb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddusb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -185,8 +310,14 @@ define <32 x i8> @stack_fold_paddusb(<32
>  declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <16 x i16> @stack_fold_paddusw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddusw
> -  ;CHECK:       vpaddusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddusw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddusw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -194,24 +325,44 @@ define <16 x i16> @stack_fold_paddusw(<1
>  declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_paddw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_paddw
> -  ;CHECK:       vpaddw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_paddw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpaddw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = add <16 x i16> %a0, %a1
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_palignr(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_palignr
> -  ;CHECK:       vpalignr $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_palignr:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpalignr $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
>    ret <32 x i8> %2
>  }
>
>  define <32 x i8> @stack_fold_pand(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pand
> -  ;CHECK:       vpand {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pand:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpand {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vpaddb {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = and <32 x i8> %a0, %a1
>    ; add forces execution domain
> @@ -220,8 +371,15 @@ define <32 x i8> @stack_fold_pand(<32 x
>  }
>
>  define <32 x i8> @stack_fold_pandn(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pandn
> -  ;CHECK:       vpandn {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pandn:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpandn {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vpaddb {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = xor <32 x i8> %a0, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
>    %3 = and <32 x i8> %2, %a1
> @@ -231,8 +389,14 @@ define <32 x i8> @stack_fold_pandn(<32 x
>  }
>
>  define <32 x i8> @stack_fold_pavgb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pavgb
> -  ;CHECK:       vpavgb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pavgb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpavgb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <32 x i8> %a0 to <32 x i16>
>    %3 = zext <32 x i8> %a1 to <32 x i16>
> @@ -244,8 +408,14 @@ define <32 x i8> @stack_fold_pavgb(<32 x
>  }
>
>  define <16 x i16> @stack_fold_pavgw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pavgw
> -  ;CHECK:       vpavgw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pavgw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpavgw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <16 x i16> %a0 to <16 x i32>
>    %3 = zext <16 x i16> %a1 to <16 x i32>
> @@ -257,8 +427,17 @@ define <16 x i16> @stack_fold_pavgw(<16
>  }
>
>  define <4 x i32> @stack_fold_pblendd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pblendd
> -  ;CHECK:       vpblendd $7, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendd $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # xmm0 = mem[0,1,2],xmm0[3]
> +; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
>    ; add forces execution domain
> @@ -267,8 +446,16 @@ define <4 x i32> @stack_fold_pblendd(<4
>  }
>
>  define <8 x i32> @stack_fold_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pblendd_ymm
> -  ;CHECK:       vpblendd $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0,1,2],ymm0[3,4,5,6,7]
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
>    ; add forces execution domain
> @@ -277,8 +464,14 @@ define <8 x i32> @stack_fold_pblendd_ymm
>  }
>
>  define <32 x i8> @stack_fold_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %c) {
> -  ;CHECK-LABEL: stack_fold_pblendvb
> -  ;CHECK:       vpblendvb {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendvb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendvb %ymm0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a1, <32 x i8> %c, <32 x i8> %a0)
>    ret <32 x i8> %2
> @@ -286,32 +479,58 @@ define <32 x i8> @stack_fold_pblendvb(<3
>  declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone
>
>  define <16 x i16> @stack_fold_pblendw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pblendw
> -  ;CHECK:       vpblendw $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pblendw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpblendw $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0,1,2],ymm0[3,4,5,6,7],mem[8,9,10],ymm0[11,12,13,14,15]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
>    ret <16 x i16> %2
>  }
>
>  define <16 x i8> @stack_fold_pbroadcastb(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastb
> -  ;CHECK:       vpbroadcastb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastb {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> zeroinitializer
>    ret <16 x i8> %2
>  }
>
>  define <32 x i8> @stack_fold_pbroadcastb_ymm(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastb_ymm
> -  ;CHECK:       vpbroadcastb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastb_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> zeroinitializer
>    ret <32 x i8> %2
>  }
>
>  define <4 x i32> @stack_fold_pbroadcastd(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastd
> -  ;CHECK:       vpbroadcastd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> zeroinitializer
>    ; add forces execution domain
> @@ -320,8 +539,15 @@ define <4 x i32> @stack_fold_pbroadcastd
>  }
>
>  define <8 x i32> @stack_fold_pbroadcastd_ymm(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastd_ymm
> -  ;CHECK:       vpbroadcastd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> zeroinitializer
>    ; add forces execution domain
> @@ -330,8 +556,15 @@ define <8 x i32> @stack_fold_pbroadcastd
>  }
>
>  define <2 x i64> @stack_fold_pbroadcastq(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastq
> -  ;CHECK:       vpbroadcastq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
>    ; add forces execution domain
> @@ -340,8 +573,15 @@ define <2 x i64> @stack_fold_pbroadcastq
>  }
>
>  define <4 x i64> @stack_fold_pbroadcastq_ymm(<2 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastq_ymm
> -  ;CHECK:       vpbroadcastq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> zeroinitializer
>    ; add forces execution domain
> @@ -350,24 +590,42 @@ define <4 x i64> @stack_fold_pbroadcastq
>  }
>
>  define <8 x i16> @stack_fold_pbroadcastw(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastw
> -  ;CHECK:       vpbroadcastw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> zeroinitializer
>    ret <8 x i16> %2
>  }
>
>  define <16 x i16> @stack_fold_pbroadcastw_ymm(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pbroadcastw_ymm
> -  ;CHECK:       vpbroadcastw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pbroadcastw_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpbroadcastw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> zeroinitializer
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqb
> -  ;CHECK:       vpcmpeqb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <32 x i8> %a0, %a1
>    %3 = sext <32 x i1> %2 to <32 x i8>
> @@ -375,8 +633,14 @@ define <32 x i8> @stack_fold_pcmpeqb(<32
>  }
>
>  define <8 x i32> @stack_fold_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqd
> -  ;CHECK:       vpcmpeqd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <8 x i32> %a0, %a1
>    %3 = sext <8 x i1> %2 to <8 x i32>
> @@ -384,8 +648,14 @@ define <8 x i32> @stack_fold_pcmpeqd(<8
>  }
>
>  define <4 x i64> @stack_fold_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqq
> -  ;CHECK:       vpcmpeqq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <4 x i64> %a0, %a1
>    %3 = sext <4 x i1> %2 to <4 x i64>
> @@ -393,8 +663,14 @@ define <4 x i64> @stack_fold_pcmpeqq(<4
>  }
>
>  define <16 x i16> @stack_fold_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpeqw
> -  ;CHECK:       vpcmpeqw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpeqw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpeqw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp eq <16 x i16> %a0, %a1
>    %3 = sext <16 x i1> %2 to <16 x i16>
> @@ -402,8 +678,14 @@ define <16 x i16> @stack_fold_pcmpeqw(<1
>  }
>
>  define <32 x i8> @stack_fold_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtb
> -  ;CHECK:       vpcmpgtb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <32 x i8> %a0, %a1
>    %3 = sext <32 x i1> %2 to <32 x i8>
> @@ -411,8 +693,14 @@ define <32 x i8> @stack_fold_pcmpgtb(<32
>  }
>
>  define <8 x i32> @stack_fold_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtd
> -  ;CHECK:       vpcmpgtd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i32> %a0, %a1
>    %3 = sext <8 x i1> %2 to <8 x i32>
> @@ -420,8 +708,14 @@ define <8 x i32> @stack_fold_pcmpgtd(<8
>  }
>
>  define <4 x i64> @stack_fold_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtq
> -  ;CHECK:       vpcmpgtq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <4 x i64> %a0, %a1
>    %3 = sext <4 x i1> %2 to <4 x i64>
> @@ -429,8 +723,14 @@ define <4 x i64> @stack_fold_pcmpgtq(<4
>  }
>
>  define <16 x i16> @stack_fold_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pcmpgtw
> -  ;CHECK:       vpcmpgtw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pcmpgtw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpcmpgtw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i16> %a0, %a1
>    %3 = sext <16 x i1> %2 to <16 x i16>
> @@ -438,8 +738,16 @@ define <16 x i16> @stack_fold_pcmpgtw(<1
>  }
>
>  define <8 x i32> @stack_fold_perm2i128(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_perm2i128
> -  ;CHECK:   vperm2i128 $33, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_perm2i128:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vperm2i128 $33, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[2,3],mem[0,1]
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
>    ; add forces execution domain
> @@ -448,8 +756,15 @@ define <8 x i32> @stack_fold_perm2i128(<
>  }
>
>  define <8 x i32> @stack_fold_permd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_permd
> -  ;CHECK:   vpermd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a1, <8 x i32> %a0)
>    ; add forces execution domain
> @@ -459,8 +774,17 @@ define <8 x i32> @stack_fold_permd(<8 x
>  declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
>
>  define <4 x double> @stack_fold_permpd(<4 x double> %a0) {
> -  ;CHECK-LABEL: stack_fold_permpd
> -  ;CHECK:   vpermpd $235, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permpd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermpd $235, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,2,3]
> +; CHECK-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
> +; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
>    ; fadd forces execution domain
> @@ -469,8 +793,14 @@ define <4 x double> @stack_fold_permpd(<
>  }
>
>  define <8 x float> @stack_fold_permps(<8 x i32> %a0, <8 x float> %a1) {
> -  ;CHECK-LABEL: stack_fold_permps
> -  ;CHECK:       vpermps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permps:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a1, <8 x i32> %a0)
>    ret <8 x float> %2
> @@ -478,8 +808,16 @@ define <8 x float> @stack_fold_permps(<8
>  declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
>
>  define <4 x i64> @stack_fold_permq(<4 x i64> %a0) {
> -  ;CHECK-LABEL: stack_fold_permq
> -  ;CHECK:   vpermq $235, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_permq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpermq $235, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,2,3]
> +; CHECK-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
>    ; add forces execution domain
> @@ -488,8 +826,14 @@ define <4 x i64> @stack_fold_permq(<4 x
>  }
>
>  define <8 x i32> @stack_fold_phaddd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddd
> -  ;CHECK:       vphaddd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -497,8 +841,14 @@ define <8 x i32> @stack_fold_phaddd(<8 x
>  declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <16 x i16> @stack_fold_phaddsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddsw
> -  ;CHECK:       vphaddsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -506,8 +856,14 @@ define <16 x i16> @stack_fold_phaddsw(<1
>  declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_phaddw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phaddw
> -  ;CHECK:       vphaddw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phaddw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphaddw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -515,8 +871,14 @@ define <16 x i16> @stack_fold_phaddw(<16
>  declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <8 x i32> @stack_fold_phsubd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubd
> -  ;CHECK:       vphsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -524,8 +886,14 @@ define <8 x i32> @stack_fold_phsubd(<8 x
>  declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <16 x i16> @stack_fold_phsubsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubsw
> -  ;CHECK:       vphsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -533,8 +901,14 @@ define <16 x i16> @stack_fold_phsubsw(<1
>  declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_phsubw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_phsubw
> -  ;CHECK:       vphsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_phsubw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vphsubw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -542,8 +916,14 @@ define <16 x i16> @stack_fold_phsubw(<16
>  declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaddubsw
> -  ;CHECK:       vpmaddubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaddubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaddubsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
>    ret <16 x i16> %2
> @@ -551,8 +931,14 @@ define <16 x i16> @stack_fold_pmaddubsw(
>  declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <8 x i32> @stack_fold_pmaddwd(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaddwd
> -  ;CHECK:       vpmaddwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaddwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
>    ret <8 x i32> %2
> @@ -560,8 +946,14 @@ define <8 x i32> @stack_fold_pmaddwd(<16
>  declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <32 x i8> @stack_fold_pmaxsb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsb
> -  ;CHECK:       vpmaxsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <32 x i8> %a0, %a1
>    %3 = select <32 x i1> %2, <32 x i8> %a0, <32 x i8> %a1
> @@ -569,8 +961,14 @@ define <32 x i8> @stack_fold_pmaxsb(<32
>  }
>
>  define <8 x i32> @stack_fold_pmaxsd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsd
> -  ;CHECK:       vpmaxsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <8 x i32> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i32> %a0, <8 x i32> %a1
> @@ -578,8 +976,14 @@ define <8 x i32> @stack_fold_pmaxsd(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pmaxsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxsw
> -  ;CHECK:       vpmaxsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp sgt <16 x i16> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i16> %a0, <16 x i16> %a1
> @@ -587,8 +991,14 @@ define <16 x i16> @stack_fold_pmaxsw(<16
>  }
>
>  define <32 x i8> @stack_fold_pmaxub(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxub
> -  ;CHECK:       vpmaxub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxub:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxub {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <32 x i8> %a0, %a1
>    %3 = select <32 x i1> %2, <32 x i8> %a0, <32 x i8> %a1
> @@ -596,8 +1006,14 @@ define <32 x i8> @stack_fold_pmaxub(<32
>  }
>
>  define <8 x i32> @stack_fold_pmaxud(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxud
> -  ;CHECK:       vpmaxud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxud:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxud {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <8 x i32> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i32> %a0, <8 x i32> %a1
> @@ -605,8 +1021,14 @@ define <8 x i32> @stack_fold_pmaxud(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pmaxuw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmaxuw
> -  ;CHECK:       vpmaxuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmaxuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmaxuw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ugt <16 x i16> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i16> %a0, <16 x i16> %a1
> @@ -614,8 +1036,14 @@ define <16 x i16> @stack_fold_pmaxuw(<16
>  }
>
>  define <32 x i8> @stack_fold_pminsb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsb
> -  ;CHECK:       vpminsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <32 x i8> %a0, %a1
>    %3 = select <32 x i1> %2, <32 x i8> %a0, <32 x i8> %a1
> @@ -623,8 +1051,14 @@ define <32 x i8> @stack_fold_pminsb(<32
>  }
>
>  define <8 x i32> @stack_fold_pminsd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsd
> -  ;CHECK:       vpminsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <8 x i32> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i32> %a0, <8 x i32> %a1
> @@ -632,8 +1066,14 @@ define <8 x i32> @stack_fold_pminsd(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pminsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminsw
> -  ;CHECK:       vpminsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp slt <16 x i16> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i16> %a0, <16 x i16> %a1
> @@ -641,8 +1081,14 @@ define <16 x i16> @stack_fold_pminsw(<16
>  }
>
>  define <32 x i8> @stack_fold_pminub(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminub
> -  ;CHECK:       vpminub {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminub:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminub {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <32 x i8> %a0, %a1
>    %3 = select <32 x i1> %2, <32 x i8> %a0, <32 x i8> %a1
> @@ -650,8 +1096,14 @@ define <32 x i8> @stack_fold_pminub(<32
>  }
>
>  define <8 x i32> @stack_fold_pminud(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminud
> -  ;CHECK:       vpminud {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminud:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminud {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <8 x i32> %a0, %a1
>    %3 = select <8 x i1> %2, <8 x i32> %a0, <8 x i32> %a1
> @@ -659,8 +1111,14 @@ define <8 x i32> @stack_fold_pminud(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pminuw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pminuw
> -  ;CHECK:       vpminuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pminuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpminuw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = icmp ult <16 x i16> %a0, %a1
>    %3 = select <16 x i1> %2, <16 x i16> %a0, <16 x i16> %a1
> @@ -668,8 +1126,14 @@ define <16 x i16> @stack_fold_pminuw(<16
>  }
>
>  define <8 x i32> @stack_fold_pmovsxbd(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxbd
> -  ;CHECK:       vpmovsxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxbd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxbd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    %3 = sext <8 x i8> %2 to <8 x i32>
> @@ -677,8 +1141,14 @@ define <8 x i32> @stack_fold_pmovsxbd(<1
>  }
>
>  define <4 x i64> @stack_fold_pmovsxbq(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxbq
> -  ;CHECK:       pmovsxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxbq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxbq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
>    %3 = sext <4 x i8> %2 to <4 x i64>
> @@ -686,32 +1156,56 @@ define <4 x i64> @stack_fold_pmovsxbq(<1
>  }
>
>  define <16 x i16> @stack_fold_pmovsxbw(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxbw
> -  ;CHECK:       vpmovsxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sext <16 x i8> %a0 to <16 x i16>
>    ret <16 x i16> %2
>  }
>
>  define <4 x i64> @stack_fold_pmovsxdq(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxdq
> -  ;CHECK:       vpmovsxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sext <4 x i32> %a0 to <4 x i64>
>    ret <4 x i64> %2
>  }
>
>  define <8 x i32> @stack_fold_pmovsxwd(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxwd
> -  ;CHECK:       vpmovsxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sext <8 x i16> %a0 to <8 x i32>
>    ret <8 x i32> %2
>  }
>
>  define <4 x i64> @stack_fold_pmovsxwq(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovsxwq
> -  ;CHECK:       vpmovsxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovsxwq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovsxwq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
>    %3 = sext <4 x i16> %2 to <4 x i64>
> @@ -719,8 +1213,15 @@ define <4 x i64> @stack_fold_pmovsxwq(<8
>  }
>
>  define <8 x i32> @stack_fold_pmovzxbd(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxbd
> -  ;CHECK:       vpmovzxbd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxbd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxbd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
>    %3 = zext <8 x i8> %2 to <8 x i32>
> @@ -728,8 +1229,15 @@ define <8 x i32> @stack_fold_pmovzxbd(<1
>  }
>
>  define <4 x i64> @stack_fold_pmovzxbq(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxbq
> -  ;CHECK:       vpmovzxbq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxbq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxbq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
>    %3 = zext <4 x i8> %2 to <4 x i64>
> @@ -737,32 +1245,60 @@ define <4 x i64> @stack_fold_pmovzxbq(<1
>  }
>
>  define <16 x i16> @stack_fold_pmovzxbw(<16 x i8> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxbw
> -  ;CHECK:       vpmovzxbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <16 x i8> %a0 to <16 x i16>
>    ret <16 x i16> %2
>  }
>
>  define <4 x i64> @stack_fold_pmovzxdq(<4 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxdq
> -  ;CHECK:       vpmovzxdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <4 x i32> %a0 to <4 x i64>
>    ret <4 x i64> %2
>  }
>
>  define <8 x i32> @stack_fold_pmovzxwd(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxwd
> -  ;CHECK:       vpmovzxwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = zext <8 x i16> %a0 to <8 x i32>
>    ret <8 x i32> %2
>  }
>
>  define <4 x i64> @stack_fold_pmovzxwq(<8 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_pmovzxwq
> -  ;CHECK:       vpmovzxwq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmovzxwq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmovzxwq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
>    %3 = zext <4 x i16> %2 to <4 x i64>
> @@ -770,8 +1306,14 @@ define <4 x i64> @stack_fold_pmovzxwq(<8
>  }
>
>  define <4 x i64> @stack_fold_pmuldq(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmuldq
> -  ;CHECK:       vpmuldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmuldq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmuldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x i32> %a0 to <4 x i64>
>    %3 = bitcast <8 x i32> %a1 to <4 x i64>
> @@ -784,8 +1326,14 @@ define <4 x i64> @stack_fold_pmuldq(<8 x
>  }
>
>  define <16 x i16> @stack_fold_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhrsw
> -  ;CHECK:       vpmulhrsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhrsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhrsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -793,8 +1341,14 @@ define <16 x i16> @stack_fold_pmulhrsw(<
>  declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_pmulhuw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhuw
> -  ;CHECK:       vpmulhuw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhuw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhuw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -802,8 +1356,14 @@ define <16 x i16> @stack_fold_pmulhuw(<1
>  declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_pmulhw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulhw
> -  ;CHECK:       vpmulhw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulhw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulhw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -811,24 +1371,42 @@ define <16 x i16> @stack_fold_pmulhw(<16
>  declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <8 x i32> @stack_fold_pmulld(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmulld
> -  ;CHECK:       vpmulld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmulld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmulld {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = mul <8 x i32> %a0, %a1
>    ret <8 x i32> %2
>  }
>
>  define <16 x i16> @stack_fold_pmullw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmullw
> -  ;CHECK:       vpmullw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmullw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmullw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = mul <16 x i16> %a0, %a1
>    ret <16 x i16> %2
>  }
>
>  define <4 x i64> @stack_fold_pmuludq(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pmuludq
> -  ;CHECK:       vpmuludq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pmuludq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpmuludq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = bitcast <8 x i32> %a0 to <4 x i64>
>    %3 = bitcast <8 x i32> %a1 to <4 x i64>
> @@ -839,8 +1417,15 @@ define <4 x i64> @stack_fold_pmuludq(<8
>  }
>
>  define <32 x i8> @stack_fold_por(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_por
> -  ;CHECK:       vpor {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_por:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpor {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vpaddb {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = or <32 x i8> %a0, %a1
>    ; add forces execution domain
> @@ -849,8 +1434,14 @@ define <32 x i8> @stack_fold_por(<32 x i
>  }
>
>  define <4 x i64> @stack_fold_psadbw(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psadbw
> -  ;CHECK:       vpsadbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psadbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsadbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1)
>    ret <4 x i64> %2
> @@ -858,8 +1449,14 @@ define <4 x i64> @stack_fold_psadbw(<32
>  declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <32 x i8> @stack_fold_pshufb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pshufb
> -  ;CHECK:       vpshufb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshufb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -867,8 +1464,16 @@ define <32 x i8> @stack_fold_pshufb(<32
>  declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <8 x i32> @stack_fold_pshufd(<8 x i32> %a0) {
> -  ;CHECK-LABEL: stack_fold_pshufd
> -  ;CHECK:       vpshufd $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pshufd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufd $27, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,1,0,7,6,5,4]
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
>    ; add forces execution domain
> @@ -877,24 +1482,44 @@ define <8 x i32> @stack_fold_pshufd(<8 x
>  }
>
>  define <16 x i16> @stack_fold_vpshufhw(<16 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_vpshufhw
> -  ;CHECK:       vpshufhw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpshufhw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshufhw $27, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
>    ret <16 x i16> %2
>  }
>
>  define <16 x i16> @stack_fold_vpshuflw(<16 x i16> %a0) {
> -  ;CHECK-LABEL: stack_fold_vpshuflw
> -  ;CHECK:       vpshuflw $27, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_vpshuflw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpshuflw $27, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = mem[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_psignb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignb
> -  ;CHECK:       vpsignb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -902,8 +1527,14 @@ define <32 x i8> @stack_fold_psignb(<32
>  declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psignd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignd
> -  ;CHECK:       vpsignd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -911,8 +1542,14 @@ define <8 x i32> @stack_fold_psignd(<8 x
>  declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psignw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psignw
> -  ;CHECK:       vpsignw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psignw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsignw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -920,8 +1557,14 @@ define <16 x i16> @stack_fold_psignw(<16
>  declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <8 x i32> @stack_fold_pslld(<8 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_pslld
> -  ;CHECK:       vpslld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pslld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpslld {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1)
>    ret <8 x i32> %2
> @@ -929,8 +1572,14 @@ define <8 x i32> @stack_fold_pslld(<8 x
>  declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
>
>  define <4 x i64> @stack_fold_psllq(<4 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllq
> -  ;CHECK:       vpsllq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
>    ret <4 x i64> %2
> @@ -938,8 +1587,14 @@ define <4 x i64> @stack_fold_psllq(<4 x
>  declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psllvd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllvd
> -  ;CHECK:       vpsllvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllvd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllvd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -947,8 +1602,14 @@ define <4 x i32> @stack_fold_psllvd(<4 x
>  declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllvd_ymm
> -  ;CHECK:       vpsllvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllvd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllvd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -956,8 +1617,14 @@ define <8 x i32> @stack_fold_psllvd_ymm(
>  declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <2 x i64> @stack_fold_psllvq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllvq
> -  ;CHECK:       vpsllvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllvq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllvq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -965,8 +1632,14 @@ define <2 x i64> @stack_fold_psllvq(<2 x
>  declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <4 x i64> @stack_fold_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllvq_ymm
> -  ;CHECK:       vpsllvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllvq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllvq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
>    ret <4 x i64> %2
> @@ -974,8 +1647,14 @@ define <4 x i64> @stack_fold_psllvq_ymm(
>  declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psllw(<16 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psllw
> -  ;CHECK:       vpsllw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psllw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsllw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1)
>    ret <16 x i16> %2
> @@ -983,8 +1662,14 @@ define <16 x i16> @stack_fold_psllw(<16
>  declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psrad(<8 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrad
> -  ;CHECK:       vpsrad {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrad:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrad {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1)
>    ret <8 x i32> %2
> @@ -992,8 +1677,14 @@ define <8 x i32> @stack_fold_psrad(<8 x
>  declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psravd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psravd
> -  ;CHECK:       vpsravd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psravd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsravd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -1001,8 +1692,14 @@ define <4 x i32> @stack_fold_psravd(<4 x
>  declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psravd_ymm
> -  ;CHECK:       vpsravd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psravd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsravd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -1010,8 +1707,14 @@ define <8 x i32> @stack_fold_psravd_ymm(
>  declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psraw(<16 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psraw
> -  ;CHECK:       vpsraw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psraw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsraw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1)
>    ret <16 x i16> %2
> @@ -1019,8 +1722,14 @@ define <16 x i16> @stack_fold_psraw(<16
>  declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psrld(<8 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrld
> -  ;CHECK:       vpsrld {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrld:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrld {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1)
>    ret <8 x i32> %2
> @@ -1028,8 +1737,14 @@ define <8 x i32> @stack_fold_psrld(<8 x
>  declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
>
>  define <4 x i64> @stack_fold_psrlq(<4 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlq
> -  ;CHECK:       vpsrlq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
>    ret <4 x i64> %2
> @@ -1037,8 +1752,14 @@ define <4 x i64> @stack_fold_psrlq(<4 x
>  declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
>
>  define <4 x i32> @stack_fold_psrlvd(<4 x i32> %a0, <4 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlvd
> -  ;CHECK:       vpsrlvd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlvd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlvd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1)
>    ret <4 x i32> %2
> @@ -1046,8 +1767,14 @@ define <4 x i32> @stack_fold_psrlvd(<4 x
>  declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
>
>  define <8 x i32> @stack_fold_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlvd_ymm
> -  ;CHECK:       vpsrlvd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlvd_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlvd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1)
>    ret <8 x i32> %2
> @@ -1055,8 +1782,14 @@ define <8 x i32> @stack_fold_psrlvd_ymm(
>  declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
>
>  define <2 x i64> @stack_fold_psrlvq(<2 x i64> %a0, <2 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlvq
> -  ;CHECK:       vpsrlvq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlvq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlvq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
>    ret <2 x i64> %2
> @@ -1064,8 +1797,14 @@ define <2 x i64> @stack_fold_psrlvq(<2 x
>  declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
>
>  define <4 x i64> @stack_fold_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlvq_ymm
> -  ;CHECK:       vpsrlvq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlvq_ymm:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlvq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
>    ret <4 x i64> %2
> @@ -1073,8 +1812,14 @@ define <4 x i64> @stack_fold_psrlvq_ymm(
>  declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psrlw(<16 x i16> %a0, <8 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psrlw
> -  ;CHECK:       vpsrlw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psrlw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsrlw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1)
>    ret <16 x i16> %2
> @@ -1082,32 +1827,56 @@ define <16 x i16> @stack_fold_psrlw(<16
>  declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
>
>  define <32 x i8> @stack_fold_psubb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubb
> -  ;CHECK:       vpsubb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <32 x i8> %a0, %a1
>    ret <32 x i8> %2
>  }
>
>  define <8 x i32> @stack_fold_psubd(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubd
> -  ;CHECK:       vpsubd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <8 x i32> %a0, %a1
>    ret <8 x i32> %2
>  }
>
>  define <4 x i64> @stack_fold_psubq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubq
> -  ;CHECK:       vpsubq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <4 x i64> %a0, %a1
>    ret <4 x i64> %2
>  }
>
>  define <32 x i8> @stack_fold_psubsb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubsb
> -  ;CHECK:       vpsubsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubsb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubsb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -1115,8 +1884,14 @@ define <32 x i8> @stack_fold_psubsb(<32
>  declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psubsw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubsw
> -  ;CHECK:       vpsubsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubsw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubsw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -1124,8 +1899,14 @@ define <16 x i16> @stack_fold_psubsw(<16
>  declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <32 x i8> @stack_fold_psubusb(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubusb
> -  ;CHECK:       vpsubusb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubusb:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubusb {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %a0, <32 x i8> %a1)
>    ret <32 x i8> %2
> @@ -1133,8 +1914,14 @@ define <32 x i8> @stack_fold_psubusb(<32
>  declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psubusw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubusw
> -  ;CHECK:       vpsubusw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubusw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubusw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %a0, <16 x i16> %a1)
>    ret <16 x i16> %2
> @@ -1142,24 +1929,45 @@ define <16 x i16> @stack_fold_psubusw(<1
>  declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
>
>  define <16 x i16> @stack_fold_psubw(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_psubw
> -  ;CHECK:       vpsubw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_psubw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpsubw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = sub <16 x i16> %a0, %a1
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_punpckhbw(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhbw
> -  ;CHECK:       vpunpckhbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
>    ret <32 x i8> %2
>  }
>
>  define <8 x i32> @stack_fold_punpckhdq(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhdq
> -  ;CHECK:       vpunpckhdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7]
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
>    ; add forces execution domain
> @@ -1168,8 +1976,16 @@ define <8 x i32> @stack_fold_punpckhdq(<
>  }
>
>  define <4 x i64> @stack_fold_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhqdq
> -  ;CHECK:       vpunpckhqdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhqdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
> +; CHECK-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
>    ; add forces execution domain
> @@ -1178,24 +1994,46 @@ define <4 x i64> @stack_fold_punpckhqdq(
>  }
>
>  define <16 x i16> @stack_fold_punpckhwd(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckhwd
> -  ;CHECK:       vpunpckhwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckhwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_punpcklbw(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklbw
> -  ;CHECK:       vpunpcklbw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklbw:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
>    ret <32 x i8> %2
>  }
>
>  define <8 x i32> @stack_fold_punpckldq(<8 x i32> %a0, <8 x i32> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpckldq
> -  ;CHECK:       vpunpckldq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpckldq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
> +; CHECK-NEXT:    vpaddd {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
>    ; add forces execution domain
> @@ -1204,8 +2042,16 @@ define <8 x i32> @stack_fold_punpckldq(<
>  }
>
>  define <4 x i64> @stack_fold_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklqdq
> -  ;CHECK:       vpunpcklqdq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklqdq:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
> +; CHECK-NEXT:    vpaddq {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
>    ; add forces execution domain
> @@ -1214,16 +2060,30 @@ define <4 x i64> @stack_fold_punpcklqdq(
>  }
>
>  define <16 x i16> @stack_fold_punpcklwd(<16 x i16> %a0, <16 x i16> %a1) {
> -  ;CHECK-LABEL: stack_fold_punpcklwd
> -  ;CHECK:       vpunpcklwd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_punpcklwd:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
>    ret <16 x i16> %2
>  }
>
>  define <32 x i8> @stack_fold_pxor(<32 x i8> %a0, <32 x i8> %a1) {
> -  ;CHECK-LABEL: stack_fold_pxor
> -  ;CHECK:       vpxor {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
> +; CHECK-LABEL: stack_fold_pxor:
> +; CHECK:       # %bb.0:
> +; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
> +; CHECK-NEXT:    #APP
> +; CHECK-NEXT:    nop
> +; CHECK-NEXT:    #NO_APP
> +; CHECK-NEXT:    vpxor {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
> +; CHECK-NEXT:    vpaddb {{.*}}(%rip), %ymm0, %ymm0
> +; CHECK-NEXT:    retq
>    %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>    %2 = xor <32 x i8> %a0, %a1
>    ; add forces execution domain
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits


More information about the llvm-commits mailing list