[llvm] r229296 - [x86] Update some tests with the latest version of my script and llc.
Chandler Carruth
chandlerc at gmail.com
Sun Feb 15 01:26:15 PST 2015
Author: chandlerc
Date: Sun Feb 15 03:26:15 2015
New Revision: 229296
URL: http://llvm.org/viewvc/llvm-project?rev=229296&view=rev
Log:
[x86] Update some tests with the latest version of my script and llc.
This mostly adds some shuffle decode comments and cleans up indentation.
Modified:
llvm/trunk/test/CodeGen/X86/combine-or.ll
llvm/trunk/test/CodeGen/X86/vector-zext.ll
Modified: llvm/trunk/test/CodeGen/X86/combine-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-or.ll?rev=229296&r1=229295&r2=229296&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-or.ll Sun Feb 15 03:26:15 2015
@@ -254,7 +254,7 @@ define <2 x i64> @test20(<2 x i64> %a, <
; CHECK-LABEL: test20:
; CHECK: # BB#0:
; CHECK-NEXT: orps %xmm1, %xmm0
-; CHECK-NEXT: movq %xmm0, %xmm0
+; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: retq
%shuf1 = shufflevector <2 x i64> %a, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
%shuf2 = shufflevector <2 x i64> %b, <2 x i64> zeroinitializer, <2 x i32><i32 0, i32 2>
Modified: llvm/trunk/test/CodeGen/X86/vector-zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zext.ll?rev=229296&r1=229295&r2=229296&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-zext.ll Sun Feb 15 03:26:15 2015
@@ -29,7 +29,7 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
;
; SSE41-LABEL: zext_8i16_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
@@ -41,13 +41,13 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT: vpmovzxwd %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: retq
entry:
%B = zext <8 x i16> %A to <8 x i32>
@@ -77,7 +77,7 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
;
; SSE41-LABEL: zext_4i32_to_4i64:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxdq %xmm0, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
; SSE41-NEXT: pand %xmm3, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
@@ -89,13 +89,13 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX1-NEXT: vpmovzxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: retq
entry:
%B = zext <4 x i32> %A to <4 x i64>
@@ -127,7 +127,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
;
; SSE41-LABEL: zext_8i8_to_8i32:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxwd %xmm0, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
@@ -137,7 +137,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
;
; AVX1-LABEL: zext_8i8_to_8i32:
; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxwd %xmm0, %xmm1
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
@@ -145,7 +145,7 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x
;
; AVX2-LABEL: zext_8i8_to_8i32:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxwd %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
@@ -180,7 +180,7 @@ define <16 x i16> @zext_16i8_to_16i16(<1
;
; SSE41-LABEL: zext_16i8_to_16i16:
; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxbw %xmm0, %xmm2
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
@@ -192,13 +192,13 @@ define <16 x i16> @zext_16i8_to_16i16(<1
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: retq
entry:
%t = zext <16 x i8> %z to <16 x i16>
@@ -207,43 +207,43 @@ entry:
define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; SSE2-LABEL: load_zext_16i8_to_16i16:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: punpckhbw %xmm1, %xmm1 # xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: retq
-
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
; SSSE3-LABEL: load_zext_16i8_to_16i16:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSSE3-NEXT: pand %xmm2, %xmm0
-; SSSE3-NEXT: punpckhbw %xmm1, %xmm1 # xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: retq
-
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: load_zext_16i8_to_16i16:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxbw (%rdi), %xmm0
-; SSE41-NEXT: pmovzxbw 8(%rdi), %xmm1
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; SSE41-NEXT: retq
-
+;
; AVX1-LABEL: load_zext_16i8_to_16i16:
-; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxbw (%rdi), %xmm0
-; AVX1-NEXT: vpmovzxbw 8(%rdi), %xmm1
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
-
+;
; AVX2-LABEL: load_zext_16i8_to_16i16:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxbw (%rdi), %ymm0
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX2-NEXT: retq
entry:
%X = load <16 x i8>* %ptr
@@ -253,43 +253,43 @@ entry:
define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; SSE2-LABEL: load_zext_8i16_to_8i32:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: retq
-
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
; SSSE3-LABEL: load_zext_8i16_to_8i32:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
-; SSSE3-NEXT: pand %xmm2, %xmm0
-; SSSE3-NEXT: punpckhwd %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: retq
-
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: load_zext_8i16_to_8i32:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxwd (%rdi), %xmm0
-; SSE41-NEXT: pmovzxwd 8(%rdi), %xmm1
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; SSE41-NEXT: retq
-
+;
; AVX1-LABEL: load_zext_8i16_to_8i32:
-; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxwd (%rdi), %xmm0
-; AVX1-NEXT: vpmovzxwd 8(%rdi), %xmm1
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
-
+;
; AVX2-LABEL: load_zext_8i16_to_8i32:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxwd (%rdi), %ymm0
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: retq
entry:
%X = load <8 x i16>* %ptr
@@ -299,41 +299,41 @@ entry:
define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; SSE2-LABEL: load_zext_4i32_to_4i64:
-; SSE2: # BB#0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: pshufd $212, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pshufd $250, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: retq
-
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
; SSSE3-LABEL: load_zext_4i32_to_4i64:
-; SSSE3: # BB#0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: pshufd $212, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
-; SSSE3-NEXT: pand %xmm2, %xmm0
-; SSSE3-NEXT: pshufd $250, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: retq
-
+; SSSE3: # BB#0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
; SSE41-LABEL: load_zext_4i32_to_4i64:
-; SSE41: # BB#0: # %entry
-; SSE41-NEXT: pmovzxdq (%rdi), %xmm0
-; SSE41-NEXT: pmovzxdq 8(%rdi), %xmm1
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; SSE41-NEXT: retq
-
+;
; AVX1-LABEL: load_zext_4i32_to_4i64:
-; AVX1: # BB#0: # %entry
-; AVX1-NEXT: vpmovzxdq (%rdi), %xmm0
-; AVX1-NEXT: vpmovzxdq 8(%rdi), %xmm1
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
-
+;
; AVX2-LABEL: load_zext_4i32_to_4i64:
-; AVX2: # BB#0: # %entry
-; AVX2-NEXT: vpmovzxdq (%rdi), %ymm0
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX2-NEXT: retq
entry:
%X = load <4 x i32>* %ptr
More information about the llvm-commits
mailing list