[llvm] r266860 - [AVX512] Add avx512cd+vl runs to vector-tzcnt-128/256 tests to show using the vplzcntd/q instructions.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 19 22:19:01 PDT 2016
Author: ctopper
Date: Wed Apr 20 00:19:01 2016
New Revision: 266860
URL: http://llvm.org/viewvc/llvm-project?rev=266860&view=rev
Log:
[AVX512] Add avx512cd+vl runs to vector-tzcnt-128/256 tests to show using the vplzcntd/q instructions.
Modified:
llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll?rev=266860&r1=266859&r2=266860&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll Wed Apr 20 00:19:01 2016
@@ -5,6 +5,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512cd,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512CDVL
;
; Just one 32-bit run to make sure we do reasonable things for i64 tzcnt.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE41
@@ -166,16 +167,37 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv2i64u:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-NEXT: bsfq %rax, %rax
-; AVX-NEXT: vmovq %rax, %xmm1
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: bsfq %rax, %rax
-; AVX-NEXT: vmovq %rax, %xmm0
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: testv2i64u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: bsfq %rax, %rax
+; AVX1-NEXT: vmovq %rax, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv2i64u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: bsfq %rax, %rax
+; AVX2-NEXT: vmovq %rax, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv2i64u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubq %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vplzcntq %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm1 = [63,63]
+; AVX512CDVL-NEXT: vpsubq %xmm0, %xmm1, %xmm0
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: testv2i64u:
; X32-SSE: # BB#0:
@@ -353,6 +375,27 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; AVX512CDVL-LABEL: testv4i32:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm2
+; AVX512CDVL-NEXT: vpandd %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %xmm2, %xmm0, %xmm3
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %xmm3, %xmm4, %xmm3
+; AVX512CDVL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpandq %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpshufb %xmm0, %xmm4, %xmm0
+; AVX512CDVL-NEXT: vpaddb %xmm3, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512CDVL-NEXT: vpsadbw %xmm1, %xmm2, %xmm2
+; AVX512CDVL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512CDVL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: retq
+;
; X32-SSE-LABEL: testv4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
@@ -533,6 +576,16 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; AVX512CDVL-LABEL: testv4i32u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandd %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vplzcntd %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm0
+; AVX512CDVL-NEXT: retq
+;
; X32-SSE-LABEL: testv4i32u:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pxor %xmm1, %xmm1
@@ -658,24 +711,62 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE41-NEXT: psrlw $8, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv8i16:
-; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsllw $8, %xmm0, %xmm1
-; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv8i16:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512CDVL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512CDVL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX512CDVL-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512CDVL-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: testv8i16:
; X32-SSE: # BB#0:
@@ -799,24 +890,62 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE41-NEXT: psrlw $8, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv8i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsllw $8, %xmm0, %xmm1
-; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv8i16u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv8i16u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv8i16u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512CDVL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512CDVL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsllw $8, %xmm0, %xmm1
+; AVX512CDVL-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512CDVL-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: testv8i16u:
; X32-SSE: # BB#0:
@@ -924,21 +1053,53 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE41-NEXT: paddb %xmm4, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv16i8:
-; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv16i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv16i8:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512CDVL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512CDVL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: testv16i8:
; X32-SSE: # BB#0:
@@ -1042,21 +1203,53 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE41-NEXT: paddb %xmm4, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: testv16i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm0, %xmm3, %xmm0
-; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: testv16i8u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: testv16i8u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv16i8u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpsubb %xmm0, %xmm1, %xmm1
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %xmm2, %xmm3, %xmm2
+; AVX512CDVL-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpandq %xmm1, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpshufb %xmm0, %xmm3, %xmm0
+; AVX512CDVL-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: testv16i8u:
; X32-SSE: # BB#0:
@@ -1129,10 +1322,20 @@ define <4 x i32> @foldv4i32() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv4i32:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv4i32:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa32 {{.*#+}} xmm0 = [8,0,32,0]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32:
; X32-SSE: # BB#0:
@@ -1148,10 +1351,20 @@ define <4 x i32> @foldv4i32u() nounwind
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,32,0]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv4i32u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv4i32u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv4i32u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,32,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv4i32u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa32 {{.*#+}} xmm0 = [8,0,32,0]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv4i32u:
; X32-SSE: # BB#0:
@@ -1167,10 +1380,20 @@ define <8 x i16> @foldv8i16() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv8i16:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv8i16:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16:
; X32-SSE: # BB#0:
@@ -1186,10 +1409,20 @@ define <8 x i16> @foldv8i16u() nounwind
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv8i16u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv8i16u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv8i16u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv8i16u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,16,0,16,0,3,3]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv8i16u:
; X32-SSE: # BB#0:
@@ -1205,10 +1438,20 @@ define <16 x i8> @foldv16i8() nounwind {
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv16i8:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv16i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv16i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv16i8:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8:
; X32-SSE: # BB#0:
@@ -1224,10 +1467,20 @@ define <16 x i8> @foldv16i8u() nounwind
; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
; SSE-NEXT: retq
;
-; AVX-LABEL: foldv16i8u:
-; AVX: # BB#0:
-; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
-; AVX-NEXT: retq
+; AVX1-LABEL: foldv16i8u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv16i8u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv16i8u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} xmm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5]
+; AVX512CDVL-NEXT: retq
;
; X32-SSE-LABEL: foldv16i8u:
; X32-SSE: # BB#0:
Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=266860&r1=266859&r2=266860&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Wed Apr 20 00:19:01 2016
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512cd,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512CDVL
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
@@ -51,6 +52,23 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv4i64:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm2
+; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm3
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 0)
ret <4 x i64> %out
}
@@ -104,6 +122,16 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv4i64u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vplzcntq %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 -1)
ret <4 x i64> %out
}
@@ -169,6 +197,27 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv8i32:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm2
+; AVX512CDVL-NEXT: vpandd %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm3
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; AVX512CDVL-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
+; AVX512CDVL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; AVX512CDVL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 0)
ret <8 x i32> %out
}
@@ -234,6 +283,16 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv8i32u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandd %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vplzcntd %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 -1)
ret <8 x i32> %out
}
@@ -292,6 +351,25 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv16i16:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsllw $8, %ymm0, %ymm1
+; AVX512CDVL-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512CDVL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 0)
ret <16 x i16> %out
}
@@ -350,6 +428,25 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv16i16u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsllw $8, %ymm0, %ymm1
+; AVX512CDVL-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; AVX512CDVL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 -1)
ret <16 x i16> %out
}
@@ -399,6 +496,22 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv32i8:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 0)
ret <32 x i8> %out
}
@@ -448,78 +561,174 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: testv32i8u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm2
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512CDVL-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; AVX512CDVL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpandq %ymm1, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; AVX512CDVL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512CDVL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 -1)
ret <32 x i8> %out
}
define <4 x i64> @foldv4i64() nounwind {
-; ALL-LABEL: foldv4i64:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv4i64:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,64,0]
+; AVX512CDVL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
define <4 x i64> @foldv4i64u() nounwind {
-; ALL-LABEL: foldv4i64u:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv4i64u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv4i64u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv4i64u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,64,0]
+; AVX512CDVL-NEXT: retq
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
define <8 x i32> @foldv8i32() nounwind {
-; ALL-LABEL: foldv8i32:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv8i32:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa32 {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX512CDVL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
ret <8 x i32> %out
}
define <8 x i32> @foldv8i32u() nounwind {
-; ALL-LABEL: foldv8i32u:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv8i32u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv8i32u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv8i32u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa32 {{.*#+}} ymm0 = [8,0,32,0,16,0,3,3]
+; AVX512CDVL-NEXT: retq
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
ret <8 x i32> %out
}
define <16 x i16> @foldv16i16() nounwind {
-; ALL-LABEL: foldv16i16:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv16i16:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX512CDVL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
ret <16 x i16> %out
}
define <16 x i16> @foldv16i16u() nounwind {
-; ALL-LABEL: foldv16i16u:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv16i16u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv16i16u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv16i16u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,16,0,16,0,3,3,1,1,0,1,2,3,4,5]
+; AVX512CDVL-NEXT: retq
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
ret <16 x i16> %out
}
define <32 x i8> @foldv32i8() nounwind {
-; ALL-LABEL: foldv32i8:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv32i8:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX512CDVL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
ret <32 x i8> %out
}
define <32 x i8> @foldv32i8u() nounwind {
-; ALL-LABEL: foldv32i8u:
-; ALL: # BB#0:
-; ALL-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
-; ALL-NEXT: retq
+; AVX1-LABEL: foldv32i8u:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: foldv32i8u:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX2-NEXT: retq
+;
+; AVX512CDVL-LABEL: foldv32i8u:
+; AVX512CDVL: # BB#0:
+; AVX512CDVL-NEXT: vmovdqa64 {{.*#+}} ymm0 = [8,0,8,0,8,0,3,3,1,1,0,1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1,0,0,0,0,0]
+; AVX512CDVL-NEXT: retq
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
ret <32 x i8> %out
}
More information about the llvm-commits
mailing list