[llvm] r369909 - [X86] Automatically generate various tests. NFC

Amaury Sechet via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 26 06:53:30 PDT 2019


Author: deadalnix
Date: Mon Aug 26 06:53:29 2019
New Revision: 369909

URL: http://llvm.org/viewvc/llvm-project?rev=369909&view=rev
Log:
[X86] Automatically generate various tests. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/absolute-constant.ll
    llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
    llvm/trunk/test/CodeGen/X86/avx-isa-check.ll
    llvm/trunk/test/CodeGen/X86/avx-minmax.ll
    llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll
    llvm/trunk/test/CodeGen/X86/bit-test-shift.ll
    llvm/trunk/test/CodeGen/X86/combine-fabs.ll
    llvm/trunk/test/CodeGen/X86/combine-lds.ll
    llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
    llvm/trunk/test/CodeGen/X86/extract-combine.ll
    llvm/trunk/test/CodeGen/X86/extract-extract.ll
    llvm/trunk/test/CodeGen/X86/h-registers-2.ll
    llvm/trunk/test/CodeGen/X86/insertelement-copytoregs.ll
    llvm/trunk/test/CodeGen/X86/insertelement-legalize.ll
    llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll
    llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll
    llvm/trunk/test/CodeGen/X86/memset-3.ll
    llvm/trunk/test/CodeGen/X86/memset-sse-stack-realignment.ll
    llvm/trunk/test/CodeGen/X86/pr28472.ll
    llvm/trunk/test/CodeGen/X86/saddo-redundant-add.ll
    llvm/trunk/test/CodeGen/X86/shl_elim.ll
    llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll
    llvm/trunk/test/CodeGen/X86/sqrt.ll
    llvm/trunk/test/CodeGen/X86/store-narrow.ll
    llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll

Modified: llvm/trunk/test/CodeGen/X86/absolute-constant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/absolute-constant.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/absolute-constant.ll (original)
+++ llvm/trunk/test/CodeGen/X86/absolute-constant.ll Mon Aug 26 06:53:29 2019
@@ -1,5 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s | FileCheck %s
-; RUN: llc -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -relocation-model=pic < %s | FileCheck %s --check-prefix=PIC
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
@@ -7,10 +8,28 @@ target triple = "x86_64-unknown-linux-gn
 @foo = external global i8, align 1, !absolute_symbol !0
 
 define void @bar(i8* %x) {
+; CHECK-LABEL: bar:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    testb $foo, (%rdi)
+; CHECK-NEXT:    je .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %if.then
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    jmp xf # TAILCALL
+; CHECK-NEXT:  .LBB0_1: # %if.end
+; CHECK-NEXT:    retq
+;
+; PIC-LABEL: bar:
+; PIC:       # %bb.0: # %entry
+; PIC-NEXT:    testb $foo, (%rdi)
+; PIC-NEXT:    je .LBB0_1
+; PIC-NEXT:  # %bb.2: # %if.then
+; PIC-NEXT:    xorl %eax, %eax
+; PIC-NEXT:    jmp xf at PLT # TAILCALL
+; PIC-NEXT:  .LBB0_1: # %if.end
+; PIC-NEXT:    retq
 entry:
   %0 = load i8, i8* %x, align 1
   %conv = sext i8 %0 to i32
-  ; CHECK: testb $foo, (%rdi)
   %and = and i32 %conv, sext (i8 ptrtoint (i8* @foo to i8) to i32)
   %tobool = icmp eq i32 %and, 0
   br i1 %tobool, label %if.end, label %if.then

Modified: llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-bitcast.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-bitcast.ll Mon Aug 26 06:53:29 2019
@@ -1,9 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
 
 define i64 @bitcasti64tof64() {
 ; CHECK-LABEL: bitcasti64tof64:
 ; CHECK:       # %bb.0:
-; CHECK:         vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    # implicit-def: $rax
+; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    retq
   %a = load double, double* undef

Modified: llvm/trunk/test/CodeGen/X86/avx-isa-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-isa-check.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-isa-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-isa-check.ll Mon Aug 26 06:53:29 2019
@@ -352,7 +352,7 @@ define <8 x float> @shuffle_v8f32_113355
 }
 
 define <4 x float> @shuffle_v4f32_1133(<4 x float> %a, <4 x float> %b) {
-; vmovshdup 128 test 
+; vmovshdup 128 test
   %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
   ret <4 x float> %shuffle
 }

Modified: llvm/trunk/test/CodeGen/X86/avx-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-minmax.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-minmax.ll Mon Aug 26 06:53:29 2019
@@ -1,64 +1,81 @@
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -asm-verbose=false -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck -check-prefix=UNSAFE %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
 
-; UNSAFE-LABEL: maxpd:
-; UNSAFE: vmaxpd {{.+}}, %xmm
 define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: maxpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <2 x double> %x, %y
   %max = select <2 x i1> %max_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %max
 }
 
-; UNSAFE-LABEL: minpd:
-; UNSAFE: vminpd {{.+}}, %xmm
 define <2 x double> @minpd(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: minpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vminpd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <2 x double> %x, %y
   %min = select <2 x i1> %min_is_x, <2 x double> %x, <2 x double> %y
   ret <2 x double> %min
 }
 
-; UNSAFE-LABEL: maxps:
-; UNSAFE: vmaxps {{.+}}, %xmm
 define <4 x float> @maxps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: maxps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmaxps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <4 x float> %x, %y
   %max = select <4 x i1> %max_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %max
 }
 
-; UNSAFE-LABEL: minps:
-; UNSAFE: vminps {{.+}}, %xmm
 define <4 x float> @minps(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: minps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vminps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <4 x float> %x, %y
   %min = select <4 x i1> %min_is_x, <4 x float> %x, <4 x float> %y
   ret <4 x float> %min
 }
 
-; UNSAFE-LABEL: vmaxpd:
-; UNSAFE: vmaxpd {{.+}}, %ymm
 define <4 x double> @vmaxpd(<4 x double> %x, <4 x double> %y) {
+; CHECK-LABEL: vmaxpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmaxpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <4 x double> %x, %y
   %max = select <4 x i1> %max_is_x, <4 x double> %x, <4 x double> %y
   ret <4 x double> %max
 }
 
-; UNSAFE-LABEL: vminpd:
-; UNSAFE: vminpd {{.+}}, %ymm
 define <4 x double> @vminpd(<4 x double> %x, <4 x double> %y) {
+; CHECK-LABEL: vminpd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vminpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <4 x double> %x, %y
   %min = select <4 x i1> %min_is_x, <4 x double> %x, <4 x double> %y
   ret <4 x double> %min
 }
 
-; UNSAFE-LABEL: vmaxps:
-; UNSAFE: vmaxps {{.+}}, %ymm
 define <8 x float> @vmaxps(<8 x float> %x, <8 x float> %y) {
+; CHECK-LABEL: vmaxps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmaxps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %max_is_x = fcmp oge <8 x float> %x, %y
   %max = select <8 x i1> %max_is_x, <8 x float> %x, <8 x float> %y
   ret <8 x float> %max
 }
 
-; UNSAFE-LABEL: vminps:
-; UNSAFE: vminps {{.+}}, %ymm
 define <8 x float> @vminps(<8 x float> %x, <8 x float> %y) {
+; CHECK-LABEL: vminps:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vminps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    retq
   %min_is_x = fcmp ole <8 x float> %x, %y
   %min = select <8 x i1> %min_is_x, <8 x float> %x, <8 x float> %y
   ret <8 x float> %min

Modified: llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vpclmulqdq.ll Mon Aug 26 06:53:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx,vpclmulqdq -show-mc-encoding | FileCheck %s --check-prefix=AVX_VPCLMULQDQ
 
 ; Check for vpclmulqdq

Modified: llvm/trunk/test/CodeGen/X86/bit-test-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bit-test-shift.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bit-test-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/bit-test-shift.ll Mon Aug 26 06:53:29 2019
@@ -1,11 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s
 ; <rdar://problem/8285015>
 
 define i32 @x(i32 %t) nounwind readnone ssp {
+; CHECK-LABEL: x:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    shll $23, %eax
+; CHECK-NEXT:    sarl $31, %eax
+; CHECK-NEXT:    andl $-26, %eax
+; CHECK-NEXT:    retl
 entry:
-; CHECK: shll	$23, %eax
-; CHECK: sarl	$31, %eax
-; CHECK: andl	$-26, %eax
   %and = and i32 %t, 256
   %tobool = icmp eq i32 %and, 0
   %retval.0 = select i1 %tobool, i32 0, i32 -26

Modified: llvm/trunk/test/CodeGen/X86/combine-fabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-fabs.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-fabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-fabs.ll Mon Aug 26 06:53:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
 
@@ -44,7 +45,7 @@ define float @combine_fabs_fabs(float %a
 ;
 ; AVX-LABEL: combine_fabs_fabs:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call float @llvm.fabs.f32(float %a)
@@ -60,7 +61,7 @@ define <4 x float> @combine_vec_fabs_fab
 ;
 ; AVX-LABEL: combine_vec_fabs_fabs:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
@@ -77,7 +78,7 @@ define float @combine_fabs_fneg(float %a
 ;
 ; AVX-LABEL: combine_fabs_fneg:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub float -0.0, %a
@@ -93,7 +94,7 @@ define <4 x float> @combine_vec_fabs_fne
 ;
 ; AVX-LABEL: combine_vec_fabs_fneg:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %a
@@ -110,7 +111,7 @@ define float @combine_fabs_fcopysign(flo
 ;
 ; AVX-LABEL: combine_fabs_fcopysign:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call float @llvm.copysign.f32(float %a, float %b)
@@ -126,7 +127,7 @@ define <4 x float> @combine_vec_fabs_fco
 ;
 ; AVX-LABEL: combine_vec_fabs_fcopysign:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vbroadcastss {{.*}}(%rip), %xmm1
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)

Modified: llvm/trunk/test/CodeGen/X86/combine-lds.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-lds.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-lds.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-lds.ll Mon Aug 26 06:53:29 2019
@@ -1,6 +1,11 @@
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | grep fldl | count 1
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s
 
 define double @doload64(i64 %x) nounwind  {
+; CHECK-LABEL: doload64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
+; CHECK-NEXT:    retl
 	%tmp717 = bitcast i64 %x to double
 	ret double %tmp717
 }

Modified: llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll (original)
+++ llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll Mon Aug 26 06:53:29 2019
@@ -1,10 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=i686-- < %s | FileCheck %s
 
-; CHECK-LABEL: @bar
-; CHECK-DAG: movl $1074339512,
-; CHECK-DAG: movl $1374389535,
-; CHECK-DAG: movl $1078523331,
 define void @bar() unnamed_addr {
+; CHECK-LABEL: bar:
+; CHECK:       # %bb.0: # %entry-block
+; CHECK-NEXT:    pushl %ebp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    .cfi_offset %ebp, -8
+; CHECK-NEXT:    movl %esp, %ebp
+; CHECK-NEXT:    .cfi_def_cfa_register %ebp
+; CHECK-NEXT:    andl $-8, %esp
+; CHECK-NEXT:    subl $16, %esp
+; CHECK-NEXT:    movl $1074339512, {{[0-9]+}}(%esp) # imm = 0x40091EB8
+; CHECK-NEXT:    movl $1374389535, (%esp) # imm = 0x51EB851F
+; CHECK-NEXT:    movl $1078523331, {{[0-9]+}}(%esp) # imm = 0x4048F5C3
+; CHECK-NEXT:    movl %ebp, %esp
+; CHECK-NEXT:    popl %ebp
+; CHECK-NEXT:    .cfi_def_cfa %esp, 4
+; CHECK-NEXT:    retl
 entry-block:
   %a = alloca double
   %b = alloca float

Modified: llvm/trunk/test/CodeGen/X86/extract-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-combine.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-combine.ll Mon Aug 26 06:53:29 2019
@@ -1,7 +1,11 @@
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=core2 -o %t
-; RUN: not grep unpcklps %t
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=core2 | FileCheck %s --implicit-check-not unpcklps
 
 define i32 @foo() nounwind {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorps %xmm0, %xmm0
+; CHECK-NEXT:    movaps %xmm0, 0
 entry:
 	%tmp74.i25762 = shufflevector <16 x float> zeroinitializer, <16 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>		; <<16 x float>> [#uses=1]
 	%tmp518 = shufflevector <16 x float> %tmp74.i25762, <16 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15>		; <<4 x float>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/extract-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/extract-extract.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/extract-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/extract-extract.ll Mon Aug 26 06:53:29 2019
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=i686-- >/dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-- | FileCheck %s
 ; PR4699
 
 ; Handle this extractvalue-of-extractvalue case without getting in
@@ -10,6 +11,9 @@
         %pp = type { %cc }
 
 define fastcc void @foo(%pp* nocapture byval %p_arg) {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    retl
 entry:
         %tmp2 = getelementptr %pp, %pp* %p_arg, i64 0, i32 0         ; <%cc*> [#uses=
         %tmp3 = load %cc, %cc* %tmp2         ; <%cc> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/h-registers-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/h-registers-2.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/h-registers-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/h-registers-2.ll Mon Aug 26 06:53:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s
 
 ; Use an h register, but don't omit the explicit shift for
@@ -5,11 +6,13 @@
 
 define i32 @foo(i8* %x, i32 %y) nounwind {
 ; CHECK-LABEL: foo:
-; CHECK-NOT: ret
-; CHECK: movzbl %{{[abcd]h}},
-; CHECK-NOT: ret
-; CHECK: shll $3,
-; CHECK: ret
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movzbl %ah, %eax
+; CHECK-NEXT:    movb $77, (%ecx,%eax,8)
+; CHECK-NEXT:    shll $3, %eax
+; CHECK-NEXT:    retl
 
 	%t0 = lshr i32 %y, 8		; <i32> [#uses=1]
 	%t1 = and i32 %t0, 255		; <i32> [#uses=2]

Modified: llvm/trunk/test/CodeGen/X86/insertelement-copytoregs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-copytoregs.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-copytoregs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-copytoregs.ll Mon Aug 26 06:53:29 2019
@@ -1,7 +1,12 @@
-; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
-; CHECK-NOT: IMPLICIT_DEF
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --implicit-check-not IMPLICIT_DEF
 
 define void @foo(<2 x float>* %p) {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorps %xmm0, %xmm0
+; CHECK-NEXT:    movlps %xmm0, (%rdi)
+; CHECK-NEXT:    retq
   %t = insertelement <2 x float> undef, float 0.0, i32 0
   %v = insertelement <2 x float> %t,   float 0.0, i32 1
   br label %bb8

Modified: llvm/trunk/test/CodeGen/X86/insertelement-legalize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/insertelement-legalize.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/insertelement-legalize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/insertelement-legalize.ll Mon Aug 26 06:53:29 2019
@@ -1,7 +1,28 @@
-; RUN: llc < %s -mtriple=i686--
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-- | FileCheck %s
 
 ; Test to check that we properly legalize an insert vector element
 define void @test(<2 x i64> %val, <2 x i64>* %dst, i64 %x) nounwind {
+; CHECK-LABEL: test:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushl %edi
+; CHECK-NEXT:    pushl %esi
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT:    addl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT:    adcl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT:    addl %ecx, %ecx
+; CHECK-NEXT:    adcl %edx, %edx
+; CHECK-NEXT:    movl %ecx, 8(%eax)
+; CHECK-NEXT:    movl %esi, (%eax)
+; CHECK-NEXT:    movl %edx, 12(%eax)
+; CHECK-NEXT:    movl %edi, 4(%eax)
+; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    popl %edi
+; CHECK-NEXT:    retl
 entry:
 	%tmp4 = insertelement <2 x i64> %val, i64 %x, i32 0		; <<2 x i64>> [#uses=1]
 	%add = add <2 x i64> %tmp4, %val		; <<2 x i64>> [#uses=1]

Modified: llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll Mon Aug 26 06:53:29 2019
@@ -1,14 +1,28 @@
-; RUN: llc < %s -mcpu=generic -mtriple=x86_64-- | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mcpu=generic -mtriple=x86_64-- | FileCheck %s --implicit-check-not '{{and|movz|sar|shl}}'
 
 ; Optimize away zext-inreg and sext-inreg on the loop induction
 ; variable using trip-count information.
 
-; CHECK-LABEL: count_up
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @count_up(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_up:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-80, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 80(%rdi,%rax)
+; CHECK-NEXT:    addq $8, %rax
+; CHECK-NEXT:    jne .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -36,12 +50,25 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: count_down
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $-8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $80, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax)
+; CHECK-NEXT:    addq $-8, %rax
+; CHECK-NEXT:    jne .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -69,12 +96,25 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: count_up_signed
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @count_up_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_up_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-80, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB2_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 80(%rdi,%rax)
+; CHECK-NEXT:    addq $8, %rax
+; CHECK-NEXT:    jne .LBB2_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -104,12 +144,25 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: count_down_signed
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $-8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @count_down_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_down_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $80, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB3_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax)
+; CHECK-NEXT:    addq $-8, %rax
+; CHECK-NEXT:    jne .LBB3_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -139,12 +192,29 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: another_count_up
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @another_count_up(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_up:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-8, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB4_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 2048(%rdi,%rax)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 134217728(%rdi,%rax)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax)
+; CHECK-NEXT:    addq $8, %rax
+; CHECK-NEXT:    jne .LBB4_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -172,12 +242,33 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: another_count_down
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $-8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @another_count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-2040, %rax # imm = 0xF808
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    movq %rdi, %rcx
+; CHECK-NEXT:    movq %rdi, %rdx
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB5_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 2040(%rdi,%rax)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    divsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rcx)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdx)
+; CHECK-NEXT:    addq $-8, %rdx
+; CHECK-NEXT:    addq $134217720, %rcx # imm = 0x7FFFFF8
+; CHECK-NEXT:    addq $2040, %rax # imm = 0x7F8
+; CHECK-NEXT:    jne .LBB5_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -205,12 +296,25 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: another_count_up_signed
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @another_count_up_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_up_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-8, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB6_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    divsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax)
+; CHECK-NEXT:    addq $8, %rax
+; CHECK-NEXT:    jne .LBB6_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -240,12 +344,25 @@ return:
 	ret void
 }
 
-; CHECK-LABEL: another_count_down_signed
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: addq $-8
-; CHECK-NOT: {{and|movz|sar|shl}}
-; CHECK: jne
 define void @another_count_down_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_down_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $8, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB7_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    divsd %xmm1, %xmm3
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, -8(%rdi,%rax)
+; CHECK-NEXT:    addq $-8, %rax
+; CHECK-NEXT:    jne .LBB7_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 

Modified: llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll Mon Aug 26 06:53:29 2019
@@ -1,12 +1,35 @@
-; RUN: llc < %s -mtriple=x86_64-- > %t
-; RUN: grep and %t | count 6
-; RUN: grep movzb %t | count 6
-; RUN: grep sar %t | count 12
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
 
 ; Don't optimize away zext-inreg and sext-inreg on the loop induction
 ; variable, because it isn't safe to do so in these cases.
 
 define void @count_up(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_up:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $10, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    andl $16777215, %ecx # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    incq %rax
+; CHECK-NEXT:    jne .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -35,6 +58,32 @@ return:
 }
 
 define void @count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $10, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    andl $16777215, %ecx # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    decq %rax
+; CHECK-NEXT:    cmpq $20, %rax
+; CHECK-NEXT:    jne .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -63,6 +112,36 @@ return:
 }
 
 define void @count_up_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_up_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $10, %eax
+; CHECK-NEXT:    movl $167772160, %ecx # imm = 0xA000000
+; CHECK-NEXT:    movl $2560, %edx # imm = 0xA00
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB2_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %rdx, %rsi
+; CHECK-NEXT:    sarq $8, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movq %rcx, %rsi
+; CHECK-NEXT:    sarq $24, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    addq $16777216, %rcx # imm = 0x1000000
+; CHECK-NEXT:    addq $256, %rdx # imm = 0x100
+; CHECK-NEXT:    incq %rax
+; CHECK-NEXT:    jne .LBB2_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -93,6 +172,36 @@ return:
 }
 
 define void @count_down_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: count_down_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-10, %rax
+; CHECK-NEXT:    movl $167772160, %ecx # imm = 0xA000000
+; CHECK-NEXT:    movl $2560, %edx # imm = 0xA00
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB3_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %rdx, %rsi
+; CHECK-NEXT:    sarq $8, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movq %rcx, %rsi
+; CHECK-NEXT:    sarq $24, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 160(%rdi,%rax,8)
+; CHECK-NEXT:    addq $-16777216, %rcx # imm = 0xFF000000
+; CHECK-NEXT:    addq $-256, %rdx
+; CHECK-NEXT:    decq %rax
+; CHECK-NEXT:    jne .LBB3_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -123,6 +232,32 @@ return:
 }
 
 define void @another_count_up(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_up:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB4_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    andl $16777215, %ecx # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    incq %rax
+; CHECK-NEXT:    cmpq %rax, %rsi
+; CHECK-NEXT:    jne .LBB4_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -151,6 +286,31 @@ return:
 }
 
 define void @another_count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB5_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %sil, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    andl $16777215, %eax # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    decq %rsi
+; CHECK-NEXT:    cmpq $10, %rsi
+; CHECK-NEXT:    jne .LBB5_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -179,6 +339,37 @@ return:
 }
 
 define void @another_count_up_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_up_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorl %r8d, %r8d
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    xorl %ecx, %ecx
+; CHECK-NEXT:    movq %rdi, %rdx
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB6_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %r8, %rax
+; CHECK-NEXT:    sarq $8, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    movq %rcx, %rax
+; CHECK-NEXT:    sarq $24, %rax
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdx)
+; CHECK-NEXT:    addq $8, %rdx
+; CHECK-NEXT:    addq $16777216, %rcx # imm = 0x1000000
+; CHECK-NEXT:    addq $256, %r8 # imm = 0x100
+; CHECK-NEXT:    decq %rsi
+; CHECK-NEXT:    jne .LBB6_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -209,6 +400,37 @@ return:
 }
 
 define void @another_count_down_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: another_count_down_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    shlq $24, %rax
+; CHECK-NEXT:    leaq -10(%rsi), %rcx
+; CHECK-NEXT:    shlq $8, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB7_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %rsi, %rdx
+; CHECK-NEXT:    sarq $8, %rdx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rdx,8)
+; CHECK-NEXT:    movq %rax, %rdx
+; CHECK-NEXT:    sarq $24, %rdx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rdx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 80(%rdi,%rcx,8)
+; CHECK-NEXT:    addq $-16777216, %rax # imm = 0xFF000000
+; CHECK-NEXT:    addq $-256, %rsi
+; CHECK-NEXT:    decq %rcx
+; CHECK-NEXT:    jne .LBB7_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -239,6 +461,32 @@ return:
 }
 
 define void @yet_another_count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: yet_another_count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-2040, %rax # imm = 0xF808
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    movq %rdi, %rcx
+; CHECK-NEXT:    movq %rdi, %rdx
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB8_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 2040(%rdi,%rax)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rcx)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdx)
+; CHECK-NEXT:    addq $-8, %rdx
+; CHECK-NEXT:    addq $134217720, %rcx # imm = 0x7FFFFF8
+; CHECK-NEXT:    addq $2040, %rax # imm = 0x7F8
+; CHECK-NEXT:    jne .LBB8_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
 	br label %loop
 
@@ -267,6 +515,32 @@ return:
 }
 
 define void @yet_another_count_up(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: yet_another_count_up:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB9_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    andl $16777215, %ecx # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    addq $3, %rax
+; CHECK-NEXT:    cmpq $10, %rax
+; CHECK-NEXT:    jne .LBB9_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -295,6 +569,31 @@ return:
 }
 
 define void @still_another_count_down(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: still_another_count_down:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $10, %eax
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB10_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movzbl %al, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movl %eax, %ecx
+; CHECK-NEXT:    andl $16777215, %ecx # imm = 0xFFFFFF
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rcx,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    addq $-3, %rax
+; CHECK-NEXT:    jne .LBB10_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -323,6 +622,36 @@ return:
 }
 
 define void @yet_another_count_up_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: yet_another_count_up_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq $-10, %rax
+; CHECK-NEXT:    xorl %ecx, %ecx
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB11_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %rcx, %rsi
+; CHECK-NEXT:    sarq $8, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movq %rdx, %rsi
+; CHECK-NEXT:    sarq $24, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, 80(%rdi,%rax,8)
+; CHECK-NEXT:    addq $50331648, %rdx # imm = 0x3000000
+; CHECK-NEXT:    addq $768, %rcx # imm = 0x300
+; CHECK-NEXT:    addq $3, %rax
+; CHECK-NEXT:    jne .LBB11_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 
@@ -353,6 +682,36 @@ return:
 }
 
 define void @yet_another_count_down_signed(double* %d, i64 %n) nounwind {
+; CHECK-LABEL: yet_another_count_down_signed:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movl $10, %eax
+; CHECK-NEXT:    movl $167772160, %ecx # imm = 0xA000000
+; CHECK-NEXT:    movl $2560, %edx # imm = 0xA00
+; CHECK-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB12_1: # %loop
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movq %rdx, %rsi
+; CHECK-NEXT:    sarq $8, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm0, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movq %rcx, %rsi
+; CHECK-NEXT:    sarq $24, %rsi
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm1, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rsi,8)
+; CHECK-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; CHECK-NEXT:    mulsd %xmm2, %xmm3
+; CHECK-NEXT:    movsd %xmm3, (%rdi,%rax,8)
+; CHECK-NEXT:    addq $-50331648, %rcx # imm = 0xFD000000
+; CHECK-NEXT:    addq $-768, %rdx # imm = 0xFD00
+; CHECK-NEXT:    addq $-3, %rax
+; CHECK-NEXT:    jne .LBB12_1
+; CHECK-NEXT:  # %bb.2: # %return
+; CHECK-NEXT:    retq
 entry:
         br label %loop
 

Modified: llvm/trunk/test/CodeGen/X86/memset-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-3.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-3.ll Mon Aug 26 06:53:29 2019
@@ -1,7 +1,12 @@
-; RUN: llc -mtriple=i386-apple-darwin < %s | not grep memset
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i386-apple-darwin < %s | FileCheck %s --implicit-check-not memset
 ; PR6767
 
 define void @t() nounwind ssp {
+; CHECK-LABEL: t:
+; CHECK:       ## %bb.0: ## %entry
+; CHECK-NEXT:    subl $512, %esp ## imm = 0x200
+; CHECK-NEXT:    ud2
 entry:
   %buf = alloca [512 x i8], align 1
   %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0

Modified: llvm/trunk/test/CodeGen/X86/memset-sse-stack-realignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-sse-stack-realignment.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-sse-stack-realignment.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-sse-stack-realignment.ll Mon Aug 26 06:53:29 2019
@@ -1,75 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; Make sure that we realign the stack. Mingw32 uses 4 byte stack alignment, we
 ; need 16 bytes for SSE and 32 bytes for AVX.
 
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=pentium2 | FileCheck %s -check-prefix=NOSSE
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=pentium3 | FileCheck %s -check-prefix=SSE1
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=yonah | FileCheck %s -check-prefix=SSE2
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=corei7-avx | FileCheck %s -check-prefix=AVX1
-; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=core-avx2 | FileCheck %s -check-prefix=AVX2
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=pentium2 | FileCheck %s --check-prefix=NOSSE
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=pentium3 | FileCheck %s --check-prefixes=SSE,SSE1
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=yonah | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=corei7-avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=i386-pc-mingw32 -mcpu=core-avx2 | FileCheck %s --check-prefixes=AVX,AVX2
 
 define void @test1(i32 %t) nounwind {
+; NOSSE-LABEL: test1:
+; NOSSE:       # %bb.0:
+; NOSSE-NEXT:    pushl %ebp
+; NOSSE-NEXT:    movl %esp, %ebp
+; NOSSE-NEXT:    subl $32, %esp
+; NOSSE-NEXT:    movl 8(%ebp), %eax
+; NOSSE-NEXT:    movl $0, -4(%ebp)
+; NOSSE-NEXT:    movl $0, -8(%ebp)
+; NOSSE-NEXT:    movl $0, -12(%ebp)
+; NOSSE-NEXT:    movl $0, -16(%ebp)
+; NOSSE-NEXT:    movl $0, -20(%ebp)
+; NOSSE-NEXT:    movl $0, -24(%ebp)
+; NOSSE-NEXT:    movl $0, -28(%ebp)
+; NOSSE-NEXT:    movl $0, -32(%ebp)
+; NOSSE-NEXT:    addl $3, %eax
+; NOSSE-NEXT:    andl $-4, %eax
+; NOSSE-NEXT:    calll __alloca
+; NOSSE-NEXT:    movl %esp, %eax
+; NOSSE-NEXT:    pushl %eax
+; NOSSE-NEXT:    calll _dummy
+; NOSSE-NEXT:    movl %ebp, %esp
+; NOSSE-NEXT:    popl %ebp
+; NOSSE-NEXT:    retl
+;
+; SSE-LABEL: test1:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushl %ebp
+; SSE-NEXT:    movl %esp, %ebp
+; SSE-NEXT:    pushl %esi
+; SSE-NEXT:    andl $-16, %esp
+; SSE-NEXT:    subl $48, %esp
+; SSE-NEXT:    movl %esp, %esi
+; SSE-NEXT:    movl 8(%ebp), %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    movaps %xmm0, 16(%esi)
+; SSE-NEXT:    movaps %xmm0, (%esi)
+; SSE-NEXT:    addl $3, %eax
+; SSE-NEXT:    andl $-4, %eax
+; SSE-NEXT:    calll __alloca
+; SSE-NEXT:    movl %esp, %eax
+; SSE-NEXT:    pushl %eax
+; SSE-NEXT:    calll _dummy
+; SSE-NEXT:    leal -4(%ebp), %esp
+; SSE-NEXT:    popl %esi
+; SSE-NEXT:    popl %ebp
+; SSE-NEXT:    retl
+;
+; AVX-LABEL: test1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushl %ebp
+; AVX-NEXT:    movl %esp, %ebp
+; AVX-NEXT:    pushl %esi
+; AVX-NEXT:    andl $-32, %esp
+; AVX-NEXT:    subl $64, %esp
+; AVX-NEXT:    movl %esp, %esi
+; AVX-NEXT:    movl 8(%ebp), %eax
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %ymm0, (%esi)
+; AVX-NEXT:    addl $3, %eax
+; AVX-NEXT:    andl $-4, %eax
+; AVX-NEXT:    calll __alloca
+; AVX-NEXT:    movl %esp, %eax
+; AVX-NEXT:    pushl %eax
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    calll _dummy
+; AVX-NEXT:    leal -4(%ebp), %esp
+; AVX-NEXT:    popl %esi
+; AVX-NEXT:    popl %ebp
+; AVX-NEXT:    retl
   %tmp1210 = alloca i8, i32 32, align 4
   call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 32, i1 false)
   %x = alloca i8, i32 %t
   call void @dummy(i8* %x)
   ret void
-
-; NOSSE-LABEL: test1:
-; NOSSE-NOT: and
-; NOSSE: movl $0
-
-; SSE1-LABEL: test1:
-; SSE1: andl $-16
-; SSE1: movl %esp, %esi
-; SSE1: movaps
-
-; SSE2-LABEL: test1:
-; SSE2: andl $-16
-; SSE2: movl %esp, %esi
-; SSE2: movaps
-
-; AVX1-LABEL: test1:
-; AVX1: andl $-32
-; AVX1: movl %esp, %esi
-; AVX1: vmovaps %ymm
-
-; AVX2-LABEL: test1:
-; AVX2: andl $-32
-; AVX2: movl %esp, %esi
-; AVX2: vmovaps %ymm
-
 }
 
 define void @test2(i32 %t) nounwind {
+; NOSSE-LABEL: test2:
+; NOSSE:       # %bb.0:
+; NOSSE-NEXT:    pushl %ebp
+; NOSSE-NEXT:    movl %esp, %ebp
+; NOSSE-NEXT:    subl $16, %esp
+; NOSSE-NEXT:    movl 8(%ebp), %eax
+; NOSSE-NEXT:    movl $0, -4(%ebp)
+; NOSSE-NEXT:    movl $0, -8(%ebp)
+; NOSSE-NEXT:    movl $0, -12(%ebp)
+; NOSSE-NEXT:    movl $0, -16(%ebp)
+; NOSSE-NEXT:    addl $3, %eax
+; NOSSE-NEXT:    andl $-4, %eax
+; NOSSE-NEXT:    calll __alloca
+; NOSSE-NEXT:    movl %esp, %eax
+; NOSSE-NEXT:    pushl %eax
+; NOSSE-NEXT:    calll _dummy
+; NOSSE-NEXT:    movl %ebp, %esp
+; NOSSE-NEXT:    popl %ebp
+; NOSSE-NEXT:    retl
+;
+; SSE-LABEL: test2:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushl %ebp
+; SSE-NEXT:    movl %esp, %ebp
+; SSE-NEXT:    pushl %esi
+; SSE-NEXT:    andl $-16, %esp
+; SSE-NEXT:    subl $32, %esp
+; SSE-NEXT:    movl %esp, %esi
+; SSE-NEXT:    movl 8(%ebp), %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%esi)
+; SSE-NEXT:    addl $3, %eax
+; SSE-NEXT:    andl $-4, %eax
+; SSE-NEXT:    calll __alloca
+; SSE-NEXT:    movl %esp, %eax
+; SSE-NEXT:    pushl %eax
+; SSE-NEXT:    calll _dummy
+; SSE-NEXT:    leal -4(%ebp), %esp
+; SSE-NEXT:    popl %esi
+; SSE-NEXT:    popl %ebp
+; SSE-NEXT:    retl
+;
+; AVX-LABEL: test2:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushl %ebp
+; AVX-NEXT:    movl %esp, %ebp
+; AVX-NEXT:    pushl %esi
+; AVX-NEXT:    andl $-16, %esp
+; AVX-NEXT:    subl $32, %esp
+; AVX-NEXT:    movl %esp, %esi
+; AVX-NEXT:    movl 8(%ebp), %eax
+; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%esi)
+; AVX-NEXT:    addl $3, %eax
+; AVX-NEXT:    andl $-4, %eax
+; AVX-NEXT:    calll __alloca
+; AVX-NEXT:    movl %esp, %eax
+; AVX-NEXT:    pushl %eax
+; AVX-NEXT:    calll _dummy
+; AVX-NEXT:    leal -4(%ebp), %esp
+; AVX-NEXT:    popl %esi
+; AVX-NEXT:    popl %ebp
+; AVX-NEXT:    retl
   %tmp1210 = alloca i8, i32 16, align 4
   call void @llvm.memset.p0i8.i64(i8* align 4 %tmp1210, i8 0, i64 16, i1 false)
   %x = alloca i8, i32 %t
   call void @dummy(i8* %x)
   ret void
-
-; NOSSE-LABEL: test2:
-; NOSSE-NOT: and
-; NOSSE: movl $0
-
-; SSE1-LABEL: test2:
-; SSE1: andl $-16
-; SSE1: movl %esp, %esi
-; SSE1: movaps
-
-; SSE2-LABEL: test2:
-; SSE2: andl $-16
-; SSE2: movl %esp, %esi
-; SSE2: movaps
-
-; AVX1-LABEL: test2:
-; AVX1: andl $-16
-; AVX1: movl %esp, %esi
-; AVX1: vmovaps %xmm
-
-; AVX2-LABEL: test2:
-; AVX2: andl $-16
-; AVX2: movl %esp, %esi
-; AVX2: vmovaps %xmm
 }
 
 declare void @dummy(i8*)

Modified: llvm/trunk/test/CodeGen/X86/pr28472.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr28472.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr28472.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr28472.ll Mon Aug 26 06:53:29 2019
@@ -1,9 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
 
-; CHECK-LABEL: {{^}}same_dynamic_index_fp_vector_type:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
 define float @same_dynamic_index_fp_vector_type(float %val, i32 %idx) {
+; CHECK-LABEL: same_dynamic_index_fp_vector_type:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    retq
 bb:
   %tmp0 = insertelement <4 x float> undef, float %val, i32 %idx
   %tmp1 = extractelement <4 x float> %tmp0, i32 %idx

Modified: llvm/trunk/test/CodeGen/X86/saddo-redundant-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/saddo-redundant-add.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/saddo-redundant-add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/saddo-redundant-add.ll Mon Aug 26 06:53:29 2019
@@ -1,12 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
 
 define void @redundant_add(i64 %n) {
 ; Check that we don't create two additions for the sadd.with.overflow.
-; CHECK-LABEL: redundant_add
-; CHECK-NOT:  leaq
-; CHECK-NOT:  addq
-; CHECK:      incq
-; CHECK-NEXT: jno
+; CHECK-LABEL: redundant_add:
+; CHECK:       ## %bb.0: ## %entry
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  LBB0_1: ## %exit_check
+; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    cmpq %rdi, %rax
+; CHECK-NEXT:    jge LBB0_4
+; CHECK-NEXT:  ## %bb.2: ## %loop
+; CHECK-NEXT:    ## in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    incq %rax
+; CHECK-NEXT:    jno LBB0_1
+; CHECK-NEXT:  ## %bb.3: ## %overflow
+; CHECK-NEXT:    ud2
+; CHECK-NEXT:  LBB0_4: ## %exit
+; CHECK-NEXT:    retq
 entry:
   br label %exit_check
 

Modified: llvm/trunk/test/CodeGen/X86/shl_elim.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shl_elim.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shl_elim.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shl_elim.ll Mon Aug 26 06:53:29 2019
@@ -1,16 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-- | FileCheck %s
 
 define i32 @test1(i64 %a) nounwind {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT:    shrl %eax
+; CHECK-NEXT:    cwtl
+; CHECK-NEXT:    retl
         %tmp29 = lshr i64 %a, 24                ; <i64> [#uses=1]
         %tmp23 = trunc i64 %tmp29 to i32                ; <i32> [#uses=1]
         %tmp410 = lshr i32 %tmp23, 9            ; <i32> [#uses=1]
         %tmp45 = trunc i32 %tmp410 to i16               ; <i16> [#uses=1]
         %tmp456 = sext i16 %tmp45 to i32                ; <i32> [#uses=1]
         ret i32 %tmp456
-
-; CHECK-LABEL: test1:
-; CHECK: movl 8(%esp), %eax
-; CHECK: shrl %eax
-; CHECK: cwtl
 }
 

Modified: llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll Mon Aug 26 06:53:29 2019
@@ -1,4 +1,5 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 | FileCheck %s
 
 ; Verify that DAGCombiner does not crash when checking if it is
 ; safe to fold the shuffles in function @sample_test according to rule
@@ -15,6 +16,18 @@
 ; As a consequence, compiling the function below would have caused a crash.
 
 define void @sample_test() {
+; CHECK-LABEL: sample_test:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    jne .LBB0_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; CHECK-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; CHECK-NEXT:    movd %xmm0, (%rax)
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    retq
   br i1 undef, label %5, label %1
 
 ; <label>:1                                       ; preds = %0

Modified: llvm/trunk/test/CodeGen/X86/sqrt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sqrt.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sqrt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sqrt.ll Mon Aug 26 06:53:29 2019
@@ -1,23 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2                             | FileCheck %s --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx,+sse2 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx                             | FileCheck %s --check-prefix=AVX
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-avx2,+avx -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=AVX
 
 define float @test_sqrt_f32(float %a) {
-; SSE2-LABEL: test_sqrt_f32
-; SSE2:       sqrtss %xmm0, %xmm0
-; AVX-LABEL:  test_sqrt_f32
-; AVX:        vsqrtss %xmm0, %xmm0
+; SSE2-LABEL: test_sqrt_f32:
+; SSE2:       ## %bb.0:
+; SSE2-NEXT:    sqrtss %xmm0, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX-LABEL: test_sqrt_f32:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %res = call float @llvm.sqrt.f32(float %a)
   ret float %res
 }
 declare float @llvm.sqrt.f32(float) nounwind readnone
 
 define double @test_sqrt_f64(double %a) {
-; SSE2-LABEL: test_sqrt_f64
-; SSE2:       sqrtsd %xmm0, %xmm0
-; AVX-LABEL:  test_sqrt_f64
-; AVX:        vsqrtsd %xmm0, %xmm0
+; SSE2-LABEL: test_sqrt_f64:
+; SSE2:       ## %bb.0:
+; SSE2-NEXT:    sqrtsd %xmm0, %xmm0
+; SSE2-NEXT:    retq
+;
+; AVX-LABEL: test_sqrt_f64:
+; AVX:       ## %bb.0:
+; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %res = call double @llvm.sqrt.f64(double %a)
   ret double %res
 }

Modified: llvm/trunk/test/CodeGen/X86/store-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/store-narrow.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/store-narrow.ll (original)
+++ llvm/trunk/test/CodeGen/X86/store-narrow.ll Mon Aug 26 06:53:29 2019
@@ -1,10 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; rdar://7860110
-; RUN: llc -mtriple=x86_64-apple-darwin10.2 -asm-verbose=false < %s | FileCheck %s -check-prefix=X64
-; RUN: llc -mtriple=i686-apple-darwin10.2 -asm-verbose=false -fixup-byte-word-insts=1 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWON
-; RUN: llc -mtriple=i686-apple-darwin10.2 -asm-verbose=false -fixup-byte-word-insts=0 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWOFF
+; RUN: llc -mtriple=x86_64-apple-darwin10.2 < %s | FileCheck %s -check-prefix=X64
+; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=1 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWON
+; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=0 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWOFF
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 
 define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
+; X64-LABEL: test1:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movb %sil, (%rdi)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test1:
+; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movb %al, (%ecx)
+; X32-NEXT:    retl
 entry:
   %A = load i32, i32* %a0, align 4
   %B = and i32 %A, -256     ; 0xFFFFFF00
@@ -12,16 +24,20 @@ entry:
   %D = or i32 %C, %B
   store i32 %D, i32* %a0, align 4
   ret void
-
-; X64-LABEL: test1:
-; X64: movb	%sil, (%rdi)
-
-; X32-LABEL: test1:
-; X32: movb	8(%esp), %al
-; X32: movb	%al, (%{{.*}})
 }
 
 define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
+; X64-LABEL: test2:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movb %sil, 1(%rdi)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test2:
+; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movb %al, 1(%ecx)
+; X32-NEXT:    retl
 entry:
   %A = load i32, i32* %a0, align 4
   %B = and i32 %A, -65281    ; 0xFFFF00FF
@@ -30,15 +46,27 @@ entry:
   %D = or i32 %B, %CS
   store i32 %D, i32* %a0, align 4
   ret void
-; X64-LABEL: test2:
-; X64: movb	%sil, 1(%rdi)
-
-; X32-LABEL: test2:
-; X32: movb	8(%esp), %[[REG:[abcd]]]l
-; X32: movb	%[[REG]]l, 1(%{{.*}})
 }
 
 define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
+; X64-LABEL: test3:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movw %si, (%rdi)
+; X64-NEXT:    retq
+;
+; X32-BWON-LABEL: test3:
+; X32-BWON:       ## %bb.0: ## %entry
+; X32-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X32-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWON-NEXT:    movw %ax, (%ecx)
+; X32-BWON-NEXT:    retl
+;
+; X32-BWOFF-LABEL: test3:
+; X32-BWOFF:       ## %bb.0: ## %entry
+; X32-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWOFF-NEXT:    movw %ax, (%ecx)
+; X32-BWOFF-NEXT:    retl
 entry:
   %A = load i32, i32* %a0, align 4
   %B = and i32 %A, -65536    ; 0xFFFF0000
@@ -46,16 +74,27 @@ entry:
   %D = or i32 %B, %C
   store i32 %D, i32* %a0, align 4
   ret void
-; X64-LABEL: test3:
-; X64: movw	%si, (%rdi)
-
-; X32-LABEL: test3:
-; X32-BWON:  movzwl	8(%esp), %eax
-; X32-BWOFF: movw	8(%esp), %ax
-; X32: movw	%ax, (%{{.*}})
 }
 
 define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
+; X64-LABEL: test4:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movw %si, 2(%rdi)
+; X64-NEXT:    retq
+;
+; X32-BWON-LABEL: test4:
+; X32-BWON:       ## %bb.0: ## %entry
+; X32-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X32-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWON-NEXT:    movw %ax, 2(%ecx)
+; X32-BWON-NEXT:    retl
+;
+; X32-BWOFF-LABEL: test4:
+; X32-BWOFF:       ## %bb.0: ## %entry
+; X32-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWOFF-NEXT:    movw %ax, 2(%ecx)
+; X32-BWOFF-NEXT:    retl
 entry:
   %A = load i32, i32* %a0, align 4
   %B = and i32 %A, 65535    ; 0x0000FFFF
@@ -64,16 +103,27 @@ entry:
   %D = or i32 %B, %CS
   store i32 %D, i32* %a0, align 4
   ret void
-; X64-LABEL: test4:
-; X64: movw	%si, 2(%rdi)
-
-; X32-LABEL: test4:
-; X32-BWON:  movzwl	8(%esp), %e[[REG:[abcd]]]x
-; X32-BWOFF: movw	8(%esp), %[[REG:[abcd]]]x
-; X32: movw	%[[REG]]x, 2(%{{.*}})
 }
 
 define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
+; X64-LABEL: test5:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movw %si, 2(%rdi)
+; X64-NEXT:    retq
+;
+; X32-BWON-LABEL: test5:
+; X32-BWON:       ## %bb.0: ## %entry
+; X32-BWON-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X32-BWON-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWON-NEXT:    movw %ax, 2(%ecx)
+; X32-BWON-NEXT:    retl
+;
+; X32-BWOFF-LABEL: test5:
+; X32-BWOFF:       ## %bb.0: ## %entry
+; X32-BWOFF-NEXT:    movw {{[0-9]+}}(%esp), %ax
+; X32-BWOFF-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-BWOFF-NEXT:    movw %ax, 2(%ecx)
+; X32-BWOFF-NEXT:    retl
 entry:
   %A = load i64, i64* %a0, align 4
   %B = and i64 %A, -4294901761    ; 0xFFFFFFFF0000FFFF
@@ -82,16 +132,20 @@ entry:
   %D = or i64 %B, %CS
   store i64 %D, i64* %a0, align 4
   ret void
-; X64-LABEL: test5:
-; X64: movw	%si, 2(%rdi)
-
-; X32-LABEL: test5:
-; X32-BWON:  movzwl	8(%esp), %e[[REG:[abcd]]]x
-; X32-BWOFF: movw	8(%esp), %[[REG:[abcd]]]x
-; X32: movw	%[[REG]]x, 2(%{{.*}})
 }
 
 define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
+; X64-LABEL: test6:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movb %sil, 5(%rdi)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test6:
+; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movb %al, 5(%ecx)
+; X32-NEXT:    retl
 entry:
   %A = load i64, i64* %a0, align 4
   %B = and i64 %A, -280375465082881    ; 0xFFFF00FFFFFFFFFF
@@ -100,16 +154,23 @@ entry:
   %D = or i64 %B, %CS
   store i64 %D, i64* %a0, align 4
   ret void
-; X64-LABEL: test6:
-; X64: movb	%sil, 5(%rdi)
-
-
-; X32-LABEL: test6:
-; X32: movb	8(%esp), %[[REG:[abcd]l]]
-; X32: movb	%[[REG]], 5(%{{.*}})
 }
 
 define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind {
+; X64-LABEL: test7:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movl (%rdx), %eax
+; X64-NEXT:    movb %sil, 5(%rdi)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test7:
+; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %eax
+; X32-NEXT:    movb %cl, 5(%edx)
+; X32-NEXT:    retl
 entry:
   %OtherLoad = load i32 , i32 *%P2
   %A = load i64, i64* %a0, align 4
@@ -119,23 +180,22 @@ entry:
   %D = or i64 %B, %CS
   store i64 %D, i64* %a0, align 4
   ret i32 %OtherLoad
-; X64-LABEL: test7:
-; X64: movb	%sil, 5(%rdi)
-
-
-; X32-LABEL: test7:
-; X32: movb	8(%esp), %[[REG:[abcd]l]]
-; X32: movb	%[[REG]], 5(%{{.*}})
 }
 
 ; PR7833
 
 @g_16 = internal global i32 -1
 
-; X64-LABEL: test8:
-; X64-NEXT: orb  $1, _g_16(%rip)
-; X64-NEXT: ret
 define void @test8() nounwind {
+; X64-LABEL: test8:
+; X64:       ## %bb.0:
+; X64-NEXT:    orb $1, {{.*}}(%rip)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test8:
+; X32:       ## %bb.0:
+; X32-NEXT:    orb $1, _g_16
+; X32-NEXT:    retl
   %tmp = load i32, i32* @g_16
   store i32 0, i32* @g_16
   %or = or i32 %tmp, 1
@@ -143,10 +203,16 @@ define void @test8() nounwind {
   ret void
 }
 
-; X64-LABEL: test9:
-; X64-NEXT: orb $1, _g_16(%rip)
-; X64-NEXT: ret
 define void @test9() nounwind {
+; X64-LABEL: test9:
+; X64:       ## %bb.0:
+; X64-NEXT:    orb $1, {{.*}}(%rip)
+; X64-NEXT:    retq
+;
+; X32-LABEL: test9:
+; X32:       ## %bb.0:
+; X32-NEXT:    orb $1, _g_16
+; X32-NEXT:    retl
   %tmp = load i32, i32* @g_16
   %or = or i32 %tmp, 1
   store i32 %or, i32* @g_16
@@ -154,11 +220,20 @@ define void @test9() nounwind {
 }
 
 ; rdar://8494845 + PR8244
-; X64-LABEL: test10:
-; X64-NEXT: movsbl	(%rdi), %eax
-; X64-NEXT: shrl	$8, %eax
-; X64-NEXT: ret
 define i8 @test10(i8* %P) nounwind ssp {
+; X64-LABEL: test10:
+; X64:       ## %bb.0: ## %entry
+; X64-NEXT:    movsbl (%rdi), %eax
+; X64-NEXT:    shrl $8, %eax
+; X64-NEXT:    ## kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: test10:
+; X32:       ## %bb.0: ## %entry
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movsbl (%eax), %eax
+; X32-NEXT:    movb %ah, %al
+; X32-NEXT:    retl
 entry:
   %tmp = load i8, i8* %P, align 1
   %conv = sext i8 %tmp to i32

Modified: llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll?rev=369909&r1=369908&r2=369909&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll Mon Aug 26 06:53:29 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -mtriple=x86_64-apple-macosx10.9.0  -mcpu=core2 -mattr=+64bit,+sse2 < %s | FileCheck %s
 
 ; DAGCombine may choose to rewrite 2 loads feeding a select as a select of
@@ -7,6 +8,18 @@
 declare void @sink(<2 x double>)
 
 define void @test1(i1 %cmp) align 2 {
+; CHECK-LABEL: test1:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    testb $1, %dil
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq %rsp, %rcx
+; CHECK-NEXT:    cmovneq %rax, %rcx
+; CHECK-NEXT:    movups (%rcx), %xmm0
+; CHECK-NEXT:    callq _sink
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %1 = alloca  <2 x double>, align 16
   %2 = alloca  <2 x double>, align 8
 
@@ -15,12 +28,21 @@ define void @test1(i1 %cmp) align 2 {
   %val3 = select i1 %cmp, <2 x double> %val, <2 x double> %val2
   call void @sink(<2 x double> %val3)
   ret void
-  ; CHECK: test1
-  ; CHECK: movups
-  ; CHECK: ret
 }
 
 define void @test2(i1 %cmp) align 2 {
+; CHECK-LABEL: test2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    testb $1, %dil
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq %rsp, %rcx
+; CHECK-NEXT:    cmovneq %rax, %rcx
+; CHECK-NEXT:    movaps (%rcx), %xmm0
+; CHECK-NEXT:    callq _sink
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
   %1 = alloca  <2 x double>, align 16
   %2 = alloca  <2 x double>, align 8
 
@@ -29,7 +51,4 @@ define void @test2(i1 %cmp) align 2 {
   %val3 = select i1 %cmp, <2 x double> %val, <2 x double> %val2
   call void @sink(<2 x double> %val3)
   ret void
-  ; CHECK: test2
-  ; CHECK: movaps
-  ; CHECK: ret
 }




More information about the llvm-commits mailing list