[llvm] r246927 - [X86] Updated vector lzcnt tests. Added missing vec512 tests.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 5 04:56:31 PDT 2015


Author: rksimon
Date: Sat Sep  5 06:56:30 2015
New Revision: 246927

URL: http://llvm.org/viewvc/llvm-project?rev=246927&view=rev
Log:
[X86] Updated vector lzcnt tests. Added missing vec512 tests.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll?rev=246927&r1=246926&r2=246927&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll Sat Sep  5 06:56:30 2015
@@ -1,12 +1,10 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s --check-prefix=AVX512VLCD
-
-target triple = "x86_64-unknown-unknown"
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=ALL  --check-prefix=AVX512 --check-prefix=AVX512VLCD
 
 define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
 ; SSE2-LABEL: testv2i64:
@@ -94,11 +92,12 @@ define <2 x i64> @testv2i64(<2 x i64> %i
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntq %xmm0, %xmm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv2i64:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntq %xmm0, %xmm0
-; AVX512VLCD-NEXT:    retq
   %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 0)
   ret <2 x i64> %out
 }
@@ -174,11 +173,12 @@ define <2 x i64> @testv2i64u(<2 x i64> %
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv2i64u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntq %xmm0, %xmm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv2i64u:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntq %xmm0, %xmm0
-; AVX512VLCD-NEXT:    retq
   %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %in, i1 -1)
   ret <2 x i64> %out
 }
@@ -330,11 +330,12 @@ define <4 x i32> @testv4i32(<4 x i32> %i
 ; AVX-NEXT:    xorl $31, %eax
 ; AVX-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntd %xmm0, %xmm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv4i32:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntd %xmm0, %xmm0
-; AVX512VLCD-NEXT:    retq
   %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 0)
   ret <4 x i32> %out
 }
@@ -461,11 +462,12 @@ define <4 x i32> @testv4i32u(<4 x i32> %
 ; AVX-NEXT:    xorl $31, %eax
 ; AVX-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv4i32u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntd %xmm0, %xmm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv4i32u:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntd %xmm0, %xmm0
-; AVX512VLCD-NEXT:    retq
   %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %in, i1 -1)
   ret <4 x i32> %out
 }
@@ -717,6 +719,34 @@ define <8 x i16> @testv8i16(<8 x i16> %i
 ; AVX-NEXT:    xorl $15, %ecx
 ; AVX-NEXT:    vpinsrw $7, %ecx, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm0, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 0)
   ret <8 x i16> %out
 }
@@ -923,6 +953,34 @@ define <8 x i16> @testv8i16u(<8 x i16> %
 ; AVX-NEXT:    xorl $15, %eax
 ; AVX-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv8i16u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm0, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 -1)
   ret <8 x i16> %out
 }
@@ -1413,6 +1471,74 @@ define <16 x i8> @testv16i8(<16 x i8> %i
 ; AVX-NEXT:    xorl $7, %ecx
 ; AVX-NEXT:    vpinsrb $15, %ecx, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 0)
   ret <16 x i8> %out
 }
@@ -1812,6 +1938,74 @@ define <16 x i8> @testv16i8u(<16 x i8> %
 ; AVX-NEXT:    xorl $7, %eax
 ; AVX-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: testv16i8u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 -1)
   ret <16 x i8> %out
 }
@@ -1828,6 +2022,12 @@ define <2 x i64> @foldv2i64() nounwind {
 ; AVX-NEXT:    movl $55, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    movl $55, %eax
+; AVX512-NEXT:    vmovq %rax, %xmm0
+; AVX512-NEXT:    retq
   %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
   ret <2 x i64> %out
 }
@@ -1844,6 +2044,12 @@ define <2 x i64> @foldv2i64u() nounwind
 ; AVX-NEXT:    movl $55, %eax
 ; AVX-NEXT:    vmovq %rax, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv2i64u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    movl $55, %eax
+; AVX512-NEXT:    vmovq %rax, %xmm0
+; AVX512-NEXT:    retq
   %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
   ret <2 x i64> %out
 }
@@ -1858,6 +2064,11 @@ define <4 x i32> @foldv4i32() nounwind {
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [23,0,32,24]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa32 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 0)
   ret <4 x i32> %out
 }
@@ -1872,6 +2083,11 @@ define <4 x i32> @foldv4i32u() nounwind
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [23,0,32,24]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv4i32u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa32 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> <i32 256, i32 -1, i32 0, i32 255>, i1 -1)
   ret <4 x i32> %out
 }
@@ -1886,6 +2102,11 @@ define <8 x i16> @foldv8i16() nounwind {
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 0)
   ret <8 x i16> %out
 }
@@ -1900,6 +2121,11 @@ define <8 x i16> @foldv8i16u() nounwind
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv8i16u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88>, i1 -1)
   ret <8 x i16> %out
 }
@@ -1914,6 +2140,11 @@ define <16 x i8> @foldv16i8() nounwind {
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 0)
   ret <16 x i8> %out
 }
@@ -1928,6 +2159,11 @@ define <16 x i8> @foldv16i8u() nounwind
 ; AVX:       # BB#0:
 ; AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2]
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv16i8u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0
+; AVX512-NEXT:    retq
   %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32>, i1 -1)
   ret <16 x i8> %out
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll?rev=246927&r1=246926&r2=246927&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll Sat Sep  5 06:56:30 2015
@@ -1,8 +1,6 @@
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s --check-prefix=AVX512VLCD
-
-target triple = "x86_64-unknown-unknown"
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl| FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD
 
 define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
 ; AVX1-LABEL: testv4i64:
@@ -62,11 +60,12 @@ define <4 x i64> @testv4i64(<4 x i64> %i
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntq %ymm0, %ymm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv4i64:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntq %ymm0, %ymm0
-; AVX512VLCD-NEXT:    retq
   %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 0)
   ret <4 x i64> %out
 }
@@ -119,11 +118,12 @@ define <4 x i64> @testv4i64u(<4 x i64> %
 ; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv4i64u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntq %ymm0, %ymm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv4i64u:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntq %ymm0, %ymm0
-; AVX512VLCD-NEXT:    retq
   %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 -1)
   ret <4 x i64> %out
 }
@@ -222,11 +222,12 @@ define <8 x i32> @testv8i32(<8 x i32> %i
 ; AVX2-NEXT:    vpinsrd $3, %ecx, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntd %ymm0, %ymm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv8i32:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntd %ymm0, %ymm0
-; AVX512VLCD-NEXT:    retq
   %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 0)
   ret <8 x i32> %out
 }
@@ -307,11 +308,12 @@ define <8 x i32> @testv8i32u(<8 x i32> %
 ; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv8i32u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vplzcntd %ymm0, %ymm0
+; AVX512-NEXT:    retq
 
-; AVX512VLCD-LABEL: testv8i32u:
-; AVX512VLCD:       ## BB#0:
-; AVX512VLCD-NEXT:    vplzcntd %ymm0, %ymm0
-; AVX512VLCD-NEXT:    retq
   %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 -1)
   ret <8 x i32> %out
 }
@@ -490,6 +492,60 @@ define <16 x i16> @testv16i16(<16 x i16>
 ; AVX2-NEXT:    vpinsrw $7, %ecx, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrw $1, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm1, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $2, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $3, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $4, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $5, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $6, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $7, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX512-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm0, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0)
   ret <16 x i16> %out
 }
@@ -634,6 +690,60 @@ define <16 x i16> @testv16i16u(<16 x i16
 ; AVX2-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv16i16u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrw $1, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm1, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $2, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $3, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $4, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $5, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $6, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $7, %xmm1, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm1
+; AVX512-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vmovd %xmm0, %ecx
+; AVX512-NEXT:    lzcntw %cx, %cx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrw $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntw %ax, %ax
+; AVX512-NEXT:    vpinsrw $7, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1)
   ret <16 x i16> %out
 }
@@ -972,6 +1082,140 @@ define <32 x i8> @testv32i8(<32 x i8> %i
 ; AVX2-NEXT:    vpinsrb $15, %ecx, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrb $1, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $2, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $3, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $4, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $5, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $6, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $7, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $8, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $9, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $10, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $11, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $12, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $13, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $14, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $15, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX512-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
   ret <32 x i8> %out
 }
@@ -1244,78 +1488,252 @@ define <32 x i8> @testv32i8u(<32 x i8> %
 ; AVX2-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm0
 ; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: testv32i8u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrb $1, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $2, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $3, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $4, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $5, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $6, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $7, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $8, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $9, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $10, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $11, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $12, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $13, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $14, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $15, %xmm1, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX512-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT:    lzcntl %ecx, %ecx
+; AVX512-NEXT:    addl $-24, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm2
+; AVX512-NEXT:    vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX512-NEXT:    lzcntl %eax, %eax
+; AVX512-NEXT:    addl $-24, %eax
+; AVX512-NEXT:    vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
   ret <32 x i8> %out
 }
 
 define <4 x i64> @foldv4i64() nounwind {
-; ALL-LABEL: foldv4i64:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv4i64:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
   ret <4 x i64> %out
 }
 
 define <4 x i64> @foldv4i64u() nounwind {
-; ALL-LABEL: foldv4i64u:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [55,0,64,56]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv4i64u:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [55,0,64,56]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv4i64u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
   ret <4 x i64> %out
 }
 
 define <8 x i32> @foldv8i32() nounwind {
-; ALL-LABEL: foldv8i32:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv8i32:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa32 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
   ret <8 x i32> %out
 }
 
 define <8 x i32> @foldv8i32u() nounwind {
-; ALL-LABEL: foldv8i32u:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv8i32u:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv8i32u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa32 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
   ret <8 x i32> %out
 }
 
 define <16 x i16> @foldv16i16() nounwind {
-; ALL-LABEL: foldv16i16:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv16i16:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
   ret <16 x i16> %out
 }
 
 define <16 x i16> @foldv16i16u() nounwind {
-; ALL-LABEL: foldv16i16u:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv16i16u:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv16i16u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
   ret <16 x i16> %out
 }
 
 define <32 x i8> @foldv32i8() nounwind {
-; ALL-LABEL: foldv32i8:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv32i8:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
   ret <32 x i8> %out
 }
 
 define <32 x i8> @foldv32i8u() nounwind {
-; ALL-LABEL: foldv32i8u:
-; ALL:       # BB#0:
-; ALL-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; ALL-NEXT:    retq
+; AVX-LABEL: foldv32i8u:
+; AVX:       # BB#0:
+; AVX-NEXT:    vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: foldv32i8u:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa64 {{.*}}(%rip), %ymm0
+; AVX512-NEXT:    retq
   %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
   ret <32 x i8> %out
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll?rev=246927&r1=246926&r2=246927&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-lzcnt-512.ll Sat Sep  5 06:56:30 2015
@@ -1,40 +1,794 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD
 
-define <16 x i32> @test_ctlz_d(<16 x i32> %a) {
-; AVX512CD-LABEL: test_ctlz_d:
-; AVX512CD:       ## BB#0:
-; AVX512CD-NEXT:    vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT:    retq
-  %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 false)
-  ret <16 x i32> %res
-}
-
-define <8 x i64> @test_ctlz_q(<8 x i64> %a) {
-; AVX512CD-LABEL: test_ctlz_q:
-; AVX512CD:       ## BB#0:
-; AVX512CD-NEXT:    vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT:    retq
-  %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 false)
-  ret <8 x i64> %res
-}
-
-define <16 x i32> @test_ctlz_d_undef(<16 x i32> %a) {
-; AVX512CD-LABEL: test_ctlz_d_undef:
-; AVX512CD:       ## BB#0:
-; AVX512CD-NEXT:    vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT:    retq
-  %res = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %a, i1 -1)
-  ret <16 x i32> %res
-}
-
-define <8 x i64> @test_ctlz_q_undef(<8 x i64> %a) {
-; AVX512CD-LABEL: test_ctlz_q_undef:
-; AVX512CD:       ## BB#0:
-; AVX512CD-NEXT:    vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT:    retq
-  %res = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %a, i1 -1)
-  ret <8 x i64> %res
+define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
+; ALL-LABEL: testv8i64:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vplzcntq %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 0)
+  ret <8 x i64> %out
 }
 
-declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1) nounwind readonly
-declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1) nounwind readonly
+define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
+; ALL-LABEL: testv8i64u:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vplzcntq %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 -1)
+  ret <8 x i64> %out
+}
+
+define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
+; ALL-LABEL: testv16i32:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vplzcntd %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 0)
+  ret <16 x i32> %out
+}
+
+define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
+; ALL-LABEL: testv16i32u:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vplzcntd %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 -1)
+  ret <16 x i32> %out
+}
+
+define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
+; ALL-LABEL: testv32i16:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm2, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm0, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm2, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm1, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    retq
+  %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0)
+  ret <32 x i16> %out
+}
+
+define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
+; ALL-LABEL: testv32i16u:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm2, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm0, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm0, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm0
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm2, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm2, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrw $1, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vmovd %xmm1, %ecx
+; ALL-NEXT:    lzcntw %cx, %cx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $2, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $3, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $4, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $5, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $6, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrw $7, %xmm1, %eax
+; ALL-NEXT:    lzcntw %ax, %ax
+; ALL-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    retq
+  %out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1)
+  ret <32 x i16> %out
+}
+
+define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
+; ALL-LABEL: testv64i8:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm0, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm0
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm1, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    retq
+  %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
+  ret <64 x i8> %out
+}
+
+define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
+; ALL-LABEL: testv64i8u:
+; ALL:       ## BB#0:
+; ALL-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm0, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm0, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm0
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; ALL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm2, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm2, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm2
+; ALL-NEXT:    vpextrb $1, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpextrb $0, %xmm1, %ecx
+; ALL-NEXT:    lzcntl %ecx, %ecx
+; ALL-NEXT:    addl $-24, %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm3
+; ALL-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $2, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $2, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $3, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $3, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $4, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $5, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $6, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $7, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $8, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $9, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $10, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $11, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $12, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $13, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $13, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $14, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $14, %eax, %xmm3, %xmm3
+; ALL-NEXT:    vpextrb $15, %xmm1, %eax
+; ALL-NEXT:    lzcntl %eax, %eax
+; ALL-NEXT:    addl $-24, %eax
+; ALL-NEXT:    vpinsrb $15, %eax, %xmm3, %xmm1
+; ALL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; ALL-NEXT:    retq
+  %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
+  ret <64 x i8> %out
+}
+
+declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1)
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
+declare <32 x i16> @llvm.ctlz.v32i16(<32 x i16>, i1)
+declare <64 x i8> @llvm.ctlz.v64i8(<64 x i8>, i1)




More information about the llvm-commits mailing list