[llvm] r340393 - [X86][SSE] Add sdiv test case from PR38658
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 22 02:47:12 PDT 2018
Author: rksimon
Date: Wed Aug 22 02:47:12 2018
New Revision: 340393
URL: http://llvm.org/viewvc/llvm-project?rev=340393&view=rev
Log:
[X86][SSE] Add sdiv test case from PR38658
Modified:
llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
Modified: llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sdiv.ll?rev=340393&r1=340392&r2=340393&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sdiv.ll Wed Aug 22 02:47:12 2018
@@ -3309,3 +3309,205 @@ define <8 x i16> @combine_vec_sdiv_nonun
%1 = sdiv <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %1
}
+
+define <16 x i8> @pr38658(<16 x i8> %x) {
+; SSE2-LABEL: pr38658:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,147]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psraw $8, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm0, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pmullw %xmm3, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: psllw $8, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: psrlw $7, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: paddb %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: pr38658:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8,8,8,8,8]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psllw $12, %xmm2
+; SSE41-NEXT: psllw $4, %xmm0
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: paddw %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovsxbw %xmm3, %xmm3
+; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrlw $8, %xmm4
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrlw $2, %xmm4
+; SSE41-NEXT: paddw %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrlw $1, %xmm4
+; SSE41-NEXT: paddw %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: packuswb %xmm3, %xmm0
+; SSE41-NEXT: paddb %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT: psraw $8, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psllw $6, %xmm2
+; SSE41-NEXT: psllw $8, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE41-NEXT: psraw $8, %xmm2
+; SSE41-NEXT: psllw $8, %xmm2
+; SSE41-NEXT: psrlw $8, %xmm2
+; SSE41-NEXT: packuswb %xmm1, %xmm2
+; SSE41-NEXT: psrlw $7, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: paddb %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: pr38658:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8]
+; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm4
+; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm3
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm3
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm3
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $6, %xmm1, %xmm2
+; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: pr38658:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: pr38658:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT: vpsrlw $7, %xmm0, %xmm1
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: pr38658:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpsrlw $7, %xmm0, %xmm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512BW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; XOP-LABEL: pr38658:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; XOP-NEXT: vpmovsxbw %xmm1, %xmm1
+; XOP-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpperm {{.*#+}} xmm1 = xmm2[1,3,5,7,9,11,13,15],xmm1[1,3,5,7,9,11,13,15]
+; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm1
+; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT: retq
+ %1 = sdiv <16 x i8> %x, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 7>
+ ret <16 x i8> %1
+}
More information about the llvm-commits
mailing list