[llvm] r314272 - [X86] Add avx512bw command lines to the 256-bit vector idiv tests.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 26 22:17:15 PDT 2017
Author: ctopper
Date: Tue Sep 26 22:17:15 2017
New Revision: 314272
URL: http://llvm.org/viewvc/llvm-project?rev=314272&view=rev
Log:
[X86] Add avx512bw command lines to the 256-bit vector idiv tests.
Some of the operations are being sign extended to 512 bits with avx512bw.
Modified:
llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll?rev=314272&r1=314271&r2=314272&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll Tue Sep 26 22:17:15 2017
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX2NOBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX512BW
;
; sdiv by 7
@@ -497,49 +498,80 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: test_rem7_32i8:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
-; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3
-; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
-; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $7, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3
-; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
+; AVX2NOBW-LABEL: test_rem7_32i8:
+; AVX2NOBW: # BB#0:
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147]
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2NOBW-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3
+; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX2NOBW-NEXT: vpxor %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsubb %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $7, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_rem7_32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147]
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512BW-NEXT: vpmovsxbw %xmm3, %ymm3
+; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm3
+; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm1
+; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT: vpxor %ymm3, %ymm2, %ymm2
+; AVX512BW-NEXT: vpsubb %ymm3, %ymm2, %ymm2
+; AVX512BW-NEXT: vpsrlw $7, %ymm1, %ymm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: retq
%res = srem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <32 x i8> %res
}
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll?rev=314272&r1=314271&r2=314272&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll Tue Sep 26 22:17:15 2017
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX2NOBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX512BW
;
; udiv by 7
@@ -506,46 +507,74 @@ define <32 x i8> @test_rem7_32i8(<32 x i
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: test_rem7_32i8:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3
-; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
+; AVX2NOBW-LABEL: test_rem7_32i8:
+; AVX2NOBW: # BB#0:
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm2
+; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
+; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_rem7_32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm2
+; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: retq
%res = urem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <32 x i8> %res
}
More information about the llvm-commits
mailing list