[llvm] r305655 - [x86] specify triples and auto-generate complete checks; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 18 14:42:20 PDT 2017


Author: spatel
Date: Sun Jun 18 16:42:19 2017
New Revision: 305655

URL: http://llvm.org/viewvc/llvm-project?rev=305655&view=rev
Log:
[x86] specify triples and auto-generate complete checks; NFC

Modified:
    llvm/trunk/test/CodeGen/X86/widen_cast-1.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
    llvm/trunk/test/CodeGen/X86/widen_cast-4.ll

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-1.ll?rev=305655&r1=305654&r2=305655&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-1.ll Sun Jun 18 16:42:19 2017
@@ -1,53 +1,95 @@
-; RUN: llc -march=x86 -mcpu=generic -mattr=+sse4.2 < %s | FileCheck %s
-; RUN: llc -march=x86 -mcpu=atom < %s | FileCheck -check-prefix=ATOM %s
-
-; CHECK: movl
-; CHECK: paddw
-; CHECK: movq
-
-; FIXME - if this test cares about scheduling, why isn't it being checked?
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i686-unknown-unknown -mcpu=generic -mattr=+sse4.2 < %s | FileCheck %s
+; RUN: llc -mtriple=i686-unknown-unknown -mcpu=atom < %s | FileCheck -check-prefix=ATOM %s
 
 ; Scheduler causes produce a different instruction order
-; ATOM: movl
-; ATOM: paddw
-; ATOM: movq
 
 ; bitcast a v4i16 to v2i32
 
 define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind {
+; CHECK-LABEL: convert:
+; CHECK:       # BB#0: # %entry
+; CHECK-NEXT:    pushl %eax
+; CHECK-NEXT:    movl $0, (%esp)
+; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
+; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; CHECK-NEXT:    cmpl $3, (%esp)
+; CHECK-NEXT:    jg .LBB0_3
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_2: # %forbody
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT:    pmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; CHECK-NEXT:    paddw %xmm0, %xmm2
+; CHECK-NEXT:    pshufb %xmm1, %xmm2
+; CHECK-NEXT:    movq %xmm2, (%ecx,%eax,8)
+; CHECK-NEXT:    incl (%esp)
+; CHECK-NEXT:    cmpl $3, (%esp)
+; CHECK-NEXT:    jle .LBB0_2
+; CHECK-NEXT:  .LBB0_3: # %afterfor
+; CHECK-NEXT:    popl %eax
+; CHECK-NEXT:    retl
+;
+; ATOM-LABEL: convert:
+; ATOM:       # BB#0: # %entry
+; ATOM-NEXT:    pushl %eax
+; ATOM-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
+; ATOM-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; ATOM-NEXT:    movl $0, (%esp)
+; ATOM-NEXT:    cmpl $3, (%esp)
+; ATOM-NEXT:    jg .LBB0_3
+; ATOM-NEXT:    .p2align 4, 0x90
+; ATOM-NEXT:  .LBB0_2: # %forbody
+; ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
+; ATOM-NEXT:    movl (%esp), %eax
+; ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; ATOM-NEXT:    movq {{.*#+}} xmm2 = mem[0],zero
+; ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; ATOM-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; ATOM-NEXT:    paddw %xmm0, %xmm2
+; ATOM-NEXT:    pshufb %xmm1, %xmm2
+; ATOM-NEXT:    movq %xmm2, (%ecx,%eax,8)
+; ATOM-NEXT:    incl (%esp)
+; ATOM-NEXT:    cmpl $3, (%esp)
+; ATOM-NEXT:    jle .LBB0_2
+; ATOM-NEXT:  .LBB0_3: # %afterfor
+; ATOM-NEXT:    popl %eax
+; ATOM-NEXT:    retl
 entry:
-	%dst.addr = alloca <2 x i32>*		; <<2 x i32>**> [#uses=2]
-	%src.addr = alloca <4 x i16>*		; <<4 x i16>**> [#uses=2]
-	%i = alloca i32, align 4		; <i32*> [#uses=6]
+	%dst.addr = alloca <2 x i32>*
+	%src.addr = alloca <4 x i16>*
+	%i = alloca i32, align 4
 	store <2 x i32>* %dst, <2 x i32>** %dst.addr
 	store <4 x i16>* %src, <4 x i16>** %src.addr
 	store i32 0, i32* %i
 	br label %forcond
 
-forcond:		; preds = %forinc, %entry
-	%tmp = load i32, i32* %i		; <i32> [#uses=1]
-	%cmp = icmp slt i32 %tmp, 4		; <i1> [#uses=1]
+forcond:
+	%tmp = load i32, i32* %i
+	%cmp = icmp slt i32 %tmp, 4
 	br i1 %cmp, label %forbody, label %afterfor
 
-forbody:		; preds = %forcond
-	%tmp1 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp2 = load <2 x i32>*, <2 x i32>** %dst.addr		; <<2 x i32>*> [#uses=1]
-	%arrayidx = getelementptr <2 x i32>, <2 x i32>* %tmp2, i32 %tmp1		; <<2 x i32>*> [#uses=1]
-	%tmp3 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp4 = load <4 x i16>*, <4 x i16>** %src.addr		; <<4 x i16>*> [#uses=1]
-	%arrayidx5 = getelementptr <4 x i16>, <4 x i16>* %tmp4, i32 %tmp3		; <<4 x i16>*> [#uses=1]
-	%tmp6 = load <4 x i16>, <4 x i16>* %arrayidx5		; <<4 x i16>> [#uses=1]
-	%add = add <4 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1 >		; <<4 x i16>> [#uses=1]
-	%conv = bitcast <4 x i16> %add to <2 x i32>		; <<2 x i32>> [#uses=1]
+forbody:
+	%tmp1 = load i32, i32* %i
+	%tmp2 = load <2 x i32>*, <2 x i32>** %dst.addr
+	%arrayidx = getelementptr <2 x i32>, <2 x i32>* %tmp2, i32 %tmp1
+	%tmp3 = load i32, i32* %i
+	%tmp4 = load <4 x i16>*, <4 x i16>** %src.addr
+	%arrayidx5 = getelementptr <4 x i16>, <4 x i16>* %tmp4, i32 %tmp3
+	%tmp6 = load <4 x i16>, <4 x i16>* %arrayidx5
+	%add = add <4 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1 >
+	%conv = bitcast <4 x i16> %add to <2 x i32>
 	store <2 x i32> %conv, <2 x i32>* %arrayidx
 	br label %forinc
 
-forinc:		; preds = %forbody
-	%tmp7 = load i32, i32* %i		; <i32> [#uses=1]
-	%inc = add i32 %tmp7, 1		; <i32> [#uses=1]
+forinc:
+	%tmp7 = load i32, i32* %i
+	%inc = add i32 %tmp7, 1
 	store i32 %inc, i32* %i
 	br label %forcond
 
-afterfor:		; preds = %forcond
+afterfor:
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-2.ll?rev=305655&r1=305654&r2=305655&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-2.ll Sun Jun 18 16:42:19 2017
@@ -1,46 +1,70 @@
-; RUN: llc < %s -march=x86 -mcpu=nehalem -mattr=+sse4.2 | FileCheck %s
-; CHECK: pextrd
-; CHECK: pextrd
-; CHECK: movd
-; CHECK: movdqa
-
-
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s
 ; bitcast v14i16 to v7i32
 
 define void @convert(<7 x i32>* %dst, <14 x i16>* %src) nounwind {
+; CHECK-LABEL: convert:
+; CHECK:       # BB#0: # %entry
+; CHECK-NEXT:    pushl %eax
+; CHECK-NEXT:    movl $0, (%esp)
+; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; CHECK-NEXT:    movdqa {{.*#+}} xmm1 = <1,1,1,1,1,1,u,u>
+; CHECK-NEXT:    cmpl $3, (%esp)
+; CHECK-NEXT:    jg .LBB0_3
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_2: # %forbody
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    movl (%esp), %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT:    shll $5, %eax
+; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT:    movdqa (%edx,%eax), %xmm2
+; CHECK-NEXT:    paddw %xmm0, %xmm2
+; CHECK-NEXT:    movdqa 16(%edx,%eax), %xmm3
+; CHECK-NEXT:    paddw %xmm1, %xmm3
+; CHECK-NEXT:    pextrd $2, %xmm3, 24(%ecx,%eax)
+; CHECK-NEXT:    pextrd $1, %xmm3, 20(%ecx,%eax)
+; CHECK-NEXT:    movd %xmm3, 16(%ecx,%eax)
+; CHECK-NEXT:    movdqa %xmm2, (%ecx,%eax)
+; CHECK-NEXT:    incl (%esp)
+; CHECK-NEXT:    cmpl $3, (%esp)
+; CHECK-NEXT:    jle .LBB0_2
+; CHECK-NEXT:  .LBB0_3: # %afterfor
+; CHECK-NEXT:    popl %eax
+; CHECK-NEXT:    retl
 entry:
-	%dst.addr = alloca <7 x i32>*		; <<7 x i32>**> [#uses=2]
-	%src.addr = alloca <14 x i16>*		; <<14 x i16>**> [#uses=2]
-	%i = alloca i32, align 4		; <i32*> [#uses=6]
+	%dst.addr = alloca <7 x i32>*
+	%src.addr = alloca <14 x i16>*
+	%i = alloca i32, align 4
 	store <7 x i32>* %dst, <7 x i32>** %dst.addr
 	store <14 x i16>* %src, <14 x i16>** %src.addr
 	store i32 0, i32* %i
 	br label %forcond
 
-forcond:		; preds = %forinc, %entry
-	%tmp = load i32, i32* %i		; <i32> [#uses=1]
-	%cmp = icmp slt i32 %tmp, 4		; <i1> [#uses=1]
+forcond:
+	%tmp = load i32, i32* %i
+	%cmp = icmp slt i32 %tmp, 4
 	br i1 %cmp, label %forbody, label %afterfor
 
-forbody:		; preds = %forcond
-	%tmp1 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp2 = load <7 x i32>*, <7 x i32>** %dst.addr		; <<2 x i32>*> [#uses=1]
-	%arrayidx = getelementptr <7 x i32>, <7 x i32>* %tmp2, i32 %tmp1		; <<7 x i32>*> [#uses=1]
-	%tmp3 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp4 = load <14 x i16>*, <14 x i16>** %src.addr		; <<4 x i16>*> [#uses=1]
-	%arrayidx5 = getelementptr <14 x i16>, <14 x i16>* %tmp4, i32 %tmp3		; <<4 x i16>*> [#uses=1]
-	%tmp6 = load <14 x i16>, <14 x i16>* %arrayidx5		; <<4 x i16>> [#uses=1]
-	%add = add <14 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 >		; <<4 x i16>> [#uses=1]
-	%conv = bitcast <14 x i16> %add to <7 x i32>		; <<7 x i32>> [#uses=1]
+forbody:
+	%tmp1 = load i32, i32* %i
+	%tmp2 = load <7 x i32>*, <7 x i32>** %dst.addr
+	%arrayidx = getelementptr <7 x i32>, <7 x i32>* %tmp2, i32 %tmp1
+	%tmp3 = load i32, i32* %i
+	%tmp4 = load <14 x i16>*, <14 x i16>** %src.addr
+	%arrayidx5 = getelementptr <14 x i16>, <14 x i16>* %tmp4, i32 %tmp3
+	%tmp6 = load <14 x i16>, <14 x i16>* %arrayidx5
+	%add = add <14 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 >
+	%conv = bitcast <14 x i16> %add to <7 x i32>
 	store <7 x i32> %conv, <7 x i32>* %arrayidx
 	br label %forinc
 
-forinc:		; preds = %forbody
-	%tmp7 = load i32, i32* %i		; <i32> [#uses=1]
-	%inc = add i32 %tmp7, 1		; <i32> [#uses=1]
+forinc:
+	%tmp7 = load i32, i32* %i
+	%inc = add i32 %tmp7, 1
 	store i32 %inc, i32* %i
 	br label %forcond
 
-afterfor:		; preds = %forcond
+afterfor:
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-3.ll?rev=305655&r1=305654&r2=305655&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-3.ll Sun Jun 18 16:42:19 2017
@@ -1,28 +1,27 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ; bitcast v12i8 to v3i32
 
 define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
 ; X86-LABEL: convert:
-; X86:       ## BB#0: ## %entry
+; X86:       # BB#0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    paddd LCPI0_0, %xmm0
+; X86-NEXT:    paddd {{\.LCPI.*}}, %xmm0
 ; X86-NEXT:    pextrd $2, %xmm0, 8(%eax)
 ; X86-NEXT:    pextrd $1, %xmm0, 4(%eax)
 ; X86-NEXT:    movd %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: convert:
-; X64:       ## BB#0: ## %entry
+; X64:       # BB#0:
 ; X64-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; X64-NEXT:    pextrd $2, %xmm0, 8(%rdi)
 ; X64-NEXT:    movq %xmm0, (%rdi)
 ; X64-NEXT:    retq
-entry:
-	%add = add <3 x i32> %src, < i32 1, i32 1, i32 1 >		; <<3 x i32>> [#uses=1]
-	%conv = bitcast <3 x i32> %add to <12 x i8>		; <<12 x i8>> [#uses=1]
+	%add = add <3 x i32> %src, < i32 1, i32 1, i32 1 >
+	%conv = bitcast <3 x i32> %add to <12 x i8>
 	store <12 x i8> %conv, <12 x i8>* %dst.addr
 	ret void
 }

Modified: llvm/trunk/test/CodeGen/X86/widen_cast-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-4.ll?rev=305655&r1=305654&r2=305655&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-4.ll Sun Jun 18 16:42:19 2017
@@ -1,78 +1,135 @@
-; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s
-; RUN: llc < %s -march=x86 -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=NARROW
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=WIDE
+
+; FIXME: We shouldn't require both a movd and an insert in the wide version.
 
 define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
-; CHECK-LABEL: update:
-; CHECK-WIDE-LABEL: update:
+; NARROW-LABEL: update:
+; NARROW:       # BB#0: # %entry
+; NARROW-NEXT:    subl $12, %esp
+; NARROW-NEXT:    movl $0, (%esp)
+; NARROW-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; NARROW-NEXT:    movdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; NARROW-NEXT:    jmp .LBB0_1
+; NARROW-NEXT:    .p2align 4, 0x90
+; NARROW-NEXT:  .LBB0_2: # %forbody
+; NARROW-NEXT:    # in Loop: Header=BB0_1 Depth=1
+; NARROW-NEXT:    movl (%esp), %eax
+; NARROW-NEXT:    shll $3, %eax
+; NARROW-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; NARROW-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; NARROW-NEXT:    movl (%esp), %eax
+; NARROW-NEXT:    shll $3, %eax
+; NARROW-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; NARROW-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; NARROW-NEXT:    movl (%esp), %ecx
+; NARROW-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; NARROW-NEXT:    pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; NARROW-NEXT:    paddw %xmm0, %xmm2
+; NARROW-NEXT:    psllw $8, %xmm2
+; NARROW-NEXT:    psraw $8, %xmm2
+; NARROW-NEXT:    psraw $2, %xmm2
+; NARROW-NEXT:    pshufb %xmm1, %xmm2
+; NARROW-NEXT:    movq %xmm2, (%edx,%ecx,8)
+; NARROW-NEXT:    incl (%esp)
+; NARROW-NEXT:  .LBB0_1: # %forcond
+; NARROW-NEXT:    # =>This Inner Loop Header: Depth=1
+; NARROW-NEXT:    movl (%esp), %eax
+; NARROW-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; NARROW-NEXT:    jl .LBB0_2
+; NARROW-NEXT:  # BB#3: # %afterfor
+; NARROW-NEXT:    addl $12, %esp
+; NARROW-NEXT:    retl
+;
+; WIDE-LABEL: update:
+; WIDE:       # BB#0: # %entry
+; WIDE-NEXT:    subl $12, %esp
+; WIDE-NEXT:    movl $0, (%esp)
+; WIDE-NEXT:    movdqa {{.*#+}} xmm0 = <1,1,1,1,1,1,1,1,u,u,u,u,u,u,u,u>
+; WIDE-NEXT:    movdqa {{.*#+}} xmm1 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; WIDE-NEXT:    movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; WIDE-NEXT:    jmp .LBB0_1
+; WIDE-NEXT:    .p2align 4, 0x90
+; WIDE-NEXT:  .LBB0_2: # %forbody
+; WIDE-NEXT:    # in Loop: Header=BB0_1 Depth=1
+; WIDE-NEXT:    movl (%esp), %eax
+; WIDE-NEXT:    shll $3, %eax
+; WIDE-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; WIDE-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; WIDE-NEXT:    movl (%esp), %eax
+; WIDE-NEXT:    shll $3, %eax
+; WIDE-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; WIDE-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; WIDE-NEXT:    movl (%esp), %ecx
+; WIDE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; WIDE-NEXT:    movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; WIDE-NEXT:    pinsrd $1, 4(%eax,%ecx,8), %xmm3
+; WIDE-NEXT:    paddb %xmm0, %xmm3
+; WIDE-NEXT:    psrlw $2, %xmm3
+; WIDE-NEXT:    pand %xmm1, %xmm3
+; WIDE-NEXT:    pxor %xmm2, %xmm3
+; WIDE-NEXT:    psubb %xmm2, %xmm3
+; WIDE-NEXT:    pextrd $1, %xmm3, 4(%edx,%ecx,8)
+; WIDE-NEXT:    movd %xmm3, (%edx,%ecx,8)
+; WIDE-NEXT:    incl (%esp)
+; WIDE-NEXT:  .LBB0_1: # %forcond
+; WIDE-NEXT:    # =>This Inner Loop Header: Depth=1
+; WIDE-NEXT:    movl (%esp), %eax
+; WIDE-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; WIDE-NEXT:    jl .LBB0_2
+; WIDE-NEXT:  # BB#3: # %afterfor
+; WIDE-NEXT:    addl $12, %esp
+; WIDE-NEXT:    retl
 entry:
-	%dst_i.addr = alloca i64*		; <i64**> [#uses=2]
-	%src_i.addr = alloca i64*		; <i64**> [#uses=2]
-	%n.addr = alloca i32		; <i32*> [#uses=2]
-	%i = alloca i32, align 4		; <i32*> [#uses=8]
-	%dst = alloca <8 x i8>*, align 4		; <<8 x i8>**> [#uses=2]
-	%src = alloca <8 x i8>*, align 4		; <<8 x i8>**> [#uses=2]
+	%dst_i.addr = alloca i64*
+	%src_i.addr = alloca i64*
+	%n.addr = alloca i32
+	%i = alloca i32, align 4
+	%dst = alloca <8 x i8>*, align 4
+	%src = alloca <8 x i8>*, align 4
 	store i64* %dst_i, i64** %dst_i.addr
 	store i64* %src_i, i64** %src_i.addr
 	store i32 %n, i32* %n.addr
 	store i32 0, i32* %i
 	br label %forcond
 
-forcond:		; preds = %forinc, %entry
-	%tmp = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp1 = load i32, i32* %n.addr		; <i32> [#uses=1]
-	%cmp = icmp slt i32 %tmp, %tmp1		; <i1> [#uses=1]
+forcond:
+	%tmp = load i32, i32* %i
+	%tmp1 = load i32, i32* %n.addr
+	%cmp = icmp slt i32 %tmp, %tmp1
 	br i1 %cmp, label %forbody, label %afterfor
 
-forbody:		; preds = %forcond
-	%tmp2 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp3 = load i64*, i64** %dst_i.addr		; <i64*> [#uses=1]
-	%arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2		; <i64*> [#uses=1]
-	%conv = bitcast i64* %arrayidx to <8 x i8>*		; <<8 x i8>*> [#uses=1]
+forbody:
+	%tmp2 = load i32, i32* %i
+	%tmp3 = load i64*, i64** %dst_i.addr
+	%arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2
+	%conv = bitcast i64* %arrayidx to <8 x i8>*
 	store <8 x i8>* %conv, <8 x i8>** %dst
-	%tmp4 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp5 = load i64*, i64** %src_i.addr		; <i64*> [#uses=1]
-	%arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4		; <i64*> [#uses=1]
-	%conv7 = bitcast i64* %arrayidx6 to <8 x i8>*		; <<8 x i8>*> [#uses=1]
+	%tmp4 = load i32, i32* %i
+	%tmp5 = load i64*, i64** %src_i.addr
+	%arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4
+	%conv7 = bitcast i64* %arrayidx6 to <8 x i8>*
 	store <8 x i8>* %conv7, <8 x i8>** %src
-	%tmp8 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp9 = load <8 x i8>*, <8 x i8>** %dst		; <<8 x i8>*> [#uses=1]
-	%arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8		; <<8 x i8>*> [#uses=1]
-	%tmp11 = load i32, i32* %i		; <i32> [#uses=1]
-	%tmp12 = load <8 x i8>*, <8 x i8>** %src		; <<8 x i8>*> [#uses=1]
-	%arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11		; <<8 x i8>*> [#uses=1]
-	%tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13		; <<8 x i8>> [#uses=1]
-	%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 >		; <<8 x i8>> [#uses=1]
-	%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 >		; <<8 x i8>> [#uses=1]
+	%tmp8 = load i32, i32* %i
+	%tmp9 = load <8 x i8>*, <8 x i8>** %dst
+	%arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8
+	%tmp11 = load i32, i32* %i
+	%tmp12 = load <8 x i8>*, <8 x i8>** %src
+	%arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11
+	%tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13
+	%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 >
+	%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 >
 	store <8 x i8> %shr, <8 x i8>* %arrayidx10
 	br label %forinc
-; CHECK: %forbody
-; CHECK:      pmovzxbw
-; CHECK-NEXT: paddw
-; CHECK-NEXT: psllw $8
-; CHECK-NEXT: psraw $8
-; CHECK-NEXT: psraw $2
-; CHECK-NEXT: pshufb
-; CHECK-NEXT: movq
-;
-; FIXME: We shouldn't require both a movd and an insert.
-; CHECK-WIDE: %forbody
-; CHECK-WIDE:      movd
-; CHECK-WIDE-NEXT: pinsrd
-; CHECK-WIDE-NEXT: paddb
-; CHECK-WIDE-NEXT: psrlw $2
-; CHECK-WIDE-NEXT: pand
-; CHECK-WIDE-NEXT: pxor
-; CHECK-WIDE-NEXT: psubb
-; CHECK-WIDE-NEXT: pextrd
-; CHECK-WIDE-NEXT: movd
-
-forinc:		; preds = %forbody
-	%tmp15 = load i32, i32* %i		; <i32> [#uses=1]
-	%inc = add i32 %tmp15, 1		; <i32> [#uses=1]
+
+forinc:
+	%tmp15 = load i32, i32* %i
+	%inc = add i32 %tmp15, 1
 	store i32 %inc, i32* %i
 	br label %forcond
 
-afterfor:		; preds = %forcond
+afterfor:
 	ret void
 }
 




More information about the llvm-commits mailing list