[llvm] r340264 - [X86] Add SSE2 sdiv combine tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 21 03:44:06 PDT 2018
Author: rksimon
Date: Tue Aug 21 03:44:06 2018
New Revision: 340264
URL: http://llvm.org/viewvc/llvm-project?rev=340264&view=rev
Log:
[X86] Add SSE2 sdiv combine tests
Modified:
llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
Modified: llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sdiv.ll?rev=340264&r1=340263&r2=340264&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sdiv.ll Tue Aug 21 03:44:06 2018
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512BW
@@ -119,31 +120,62 @@ define i32 @combine_sdiv_zero(i32 %x) {
}
define <4 x i32> @combine_vec_sdiv_zero(<4 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_zero:
-; SSE: # %bb.0:
-; SSE-NEXT: pextrd $1, %xmm0, %ecx
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: movl %eax, %ecx
-; SSE-NEXT: movd %xmm0, %esi
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %ecx
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %ecx
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_zero:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: xorl %eax, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_zero:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pextrd $1, %xmm0, %ecx
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: xorl %edx, %edx
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: movd %xmm0, %esi
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: xorl %edx, %edx
+; SSE41-NEXT: idivl %esi
+; SSE41-NEXT: movd %eax, %xmm1
+; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %ecx
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: xorl %edx, %edx
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %ecx
+; SSE41-NEXT: xorl %eax, %eax
+; SSE41-NEXT: xorl %edx, %edx
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_zero:
; AVX: # %bb.0:
@@ -186,31 +218,62 @@ define i32 @combine_sdiv_dupe(i32 %x) {
}
define <4 x i32> @combine_vec_sdiv_dupe(<4 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_dupe:
-; SSE: # %bb.0:
-; SSE-NEXT: pextrd $1, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: movl %eax, %ecx
-; SSE-NEXT: movd %xmm0, %esi
-; SSE-NEXT: movl %esi, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_dupe:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movl %ecx, %eax
+; SSE2-NEXT: cltd
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: movl %ecx, %eax
+; SSE2-NEXT: cltd
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: movl %ecx, %eax
+; SSE2-NEXT: cltd
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: movl %ecx, %eax
+; SSE2-NEXT: cltd
+; SSE2-NEXT: idivl %ecx
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_dupe:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pextrd $1, %xmm0, %ecx
+; SSE41-NEXT: movl %ecx, %eax
+; SSE41-NEXT: cltd
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: movl %eax, %ecx
+; SSE41-NEXT: movd %xmm0, %esi
+; SSE41-NEXT: movl %esi, %eax
+; SSE41-NEXT: cltd
+; SSE41-NEXT: idivl %esi
+; SSE41-NEXT: movd %eax, %xmm1
+; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
+; SSE41-NEXT: pextrd $2, %xmm0, %ecx
+; SSE41-NEXT: movl %ecx, %eax
+; SSE41-NEXT: cltd
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: pinsrd $2, %eax, %xmm1
+; SSE41-NEXT: pextrd $3, %xmm0, %ecx
+; SSE41-NEXT: movl %ecx, %eax
+; SSE41-NEXT: cltd
+; SSE41-NEXT: idivl %ecx
+; SSE41-NEXT: pinsrd $3, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: combine_vec_sdiv_dupe:
; AVX: # %bb.0:
@@ -259,19 +322,33 @@ define <4 x i32> @combine_vec_sdiv_by_po
}
define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pos1:
-; SSE: # %bb.0:
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $3, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrld $4, %xmm0
-; SSE-NEXT: psrld $2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pos1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $4, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $3, %xmm2
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $2, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pos1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrld $4, %xmm0
+; SSE41-NEXT: psrld $2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pos1:
; AVX1: # %bb.0:
@@ -349,36 +426,69 @@ define <4 x i32> @combine_vec_sdiv_by_po
}
define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [1,4,2,16,8,32,64,2]
-; SSE-NEXT: pmullw %xmm0, %xmm3
-; SSE-NEXT: psrlw $8, %xmm3
-; SSE-NEXT: pmullw %xmm0, %xmm2
-; SSE-NEXT: psrlw $8, %xmm2
-; SSE-NEXT: packuswb %xmm3, %xmm2
-; SSE-NEXT: paddb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [256,64,128,16,32,8,4,128]
-; SSE-NEXT: pmullw %xmm3, %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: psraw $8, %xmm2
-; SSE-NEXT: pmullw %xmm3, %xmm2
-; SSE-NEXT: psrlw $8, %xmm2
-; SSE-NEXT: packuswb %xmm0, %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
-; SSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2]
+; SSE2-NEXT: pmullw %xmm4, %xmm3
+; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: pmullw %xmm4, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: paddb %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [256,64,128,16,32,8,4,128]
+; SSE2-NEXT: pmullw %xmm3, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [1,4,2,16,8,32,64,2]
+; SSE41-NEXT: pmullw %xmm0, %xmm3
+; SSE41-NEXT: psrlw $8, %xmm3
+; SSE41-NEXT: pmullw %xmm0, %xmm2
+; SSE41-NEXT: psrlw $8, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE41-NEXT: psraw $8, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [256,64,128,16,32,8,4,128]
+; SSE41-NEXT: pmullw %xmm3, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE41-NEXT: psraw $8, %xmm2
+; SSE41-NEXT: pmullw %xmm3, %xmm2
+; SSE41-NEXT: psrlw $8, %xmm2
+; SSE41-NEXT: packuswb %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i8:
; AVX1: # %bb.0:
@@ -476,24 +586,55 @@ define <16 x i8> @combine_vec_sdiv_by_po
}
define <8 x i16> @combine_vec_sdiv_by_pow2b_v8i16(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psraw $15, %xmm1
-; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1
-; SSE-NEXT: paddw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $4, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psraw $2, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,0,65535,0,0,65535]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $4, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,0,65535]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: psraw $2, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,0,0,65535,0]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $1, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $15, %xmm1
+; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $4, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $2, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i16:
; AVX1: # %bb.0:
@@ -556,40 +697,98 @@ define <8 x i16> @combine_vec_sdiv_by_po
}
define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psraw $15, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2]
-; SSE-NEXT: pmulhuw %xmm3, %xmm2
-; SSE-NEXT: paddw %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psraw $4, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3],xmm2[4],xmm4[5,6],xmm2[7]
-; SSE-NEXT: movdqa %xmm4, %xmm5
-; SSE-NEXT: psraw $2, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
-; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: psraw $1, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3],xmm2[4,5],xmm5[6],xmm2[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psraw $15, %xmm0
-; SSE-NEXT: pmulhuw %xmm3, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psraw $4, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3],xmm0[4],xmm3[5,6],xmm0[7]
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: psraw $2, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psraw $1, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6],xmm3[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psraw $15, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,4,2,16,8,32,64,2]
+; SSE2-NEXT: pmulhuw %xmm8, %xmm0
+; SSE2-NEXT: paddw %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,0,0,65535]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pandn %xmm0, %xmm6
+; SSE2-NEXT: por %xmm2, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,0,65535]
+; SSE2-NEXT: movdqa %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: psraw $2, %xmm6
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm6, %xmm2
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,0,65535,0,0,65535,0]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: psraw $1, %xmm2
+; SSE2-NEXT: movdqa %xmm7, %xmm6
+; SSE2-NEXT: pandn %xmm2, %xmm6
+; SSE2-NEXT: por %xmm0, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pandn %xmm3, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psraw $15, %xmm3
+; SSE2-NEXT: pmulhuw %xmm8, %xmm3
+; SSE2-NEXT: paddw %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: psraw $4, %xmm3
+; SSE2-NEXT: pandn %xmm3, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: psraw $2, %xmm4
+; SSE2-NEXT: pandn %xmm4, %xmm5
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: psraw $1, %xmm5
+; SSE2-NEXT: pandn %xmm5, %xmm7
+; SSE2-NEXT: por %xmm3, %xmm7
+; SSE2-NEXT: pand %xmm2, %xmm7
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psraw $15, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2]
+; SSE41-NEXT: pmulhuw %xmm3, %xmm2
+; SSE41-NEXT: paddw %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psraw $4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3],xmm2[4],xmm4[5,6],xmm2[7]
+; SSE41-NEXT: movdqa %xmm4, %xmm5
+; SSE41-NEXT: psraw $2, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: psraw $1, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3],xmm2[4,5],xmm5[6],xmm2[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psraw $15, %xmm0
+; SSE41-NEXT: pmulhuw %xmm3, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psraw $4, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3],xmm0[4],xmm3[5,6],xmm0[7]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psraw $2, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psraw $1, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6],xmm3[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i16:
; AVX1: # %bb.0:
@@ -680,69 +879,181 @@ define <16 x i16> @combine_vec_sdiv_by_p
}
define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psraw $15, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [1,4,2,16,8,32,64,2]
-; SSE-NEXT: pmulhuw %xmm5, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: psraw $4, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3],xmm0[4],xmm6[5,6],xmm0[7]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psraw $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; SSE-NEXT: movdqa %xmm7, %xmm0
-; SSE-NEXT: psraw $1, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3],xmm0[4,5],xmm7[6],xmm0[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm4, %xmm1
-; SSE-NEXT: psraw $15, %xmm1
-; SSE-NEXT: pmulhuw %xmm5, %xmm1
-; SSE-NEXT: paddw %xmm4, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: psraw $4, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3],xmm1[4],xmm6[5,6],xmm1[7]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psraw $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3],xmm1[4,5],xmm7[6],xmm1[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psraw $15, %xmm4
-; SSE-NEXT: pmulhuw %xmm5, %xmm4
-; SSE-NEXT: paddw %xmm2, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: psraw $4, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3],xmm4[4],xmm6[5,6],xmm4[7]
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psraw $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: psraw $1, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psraw $15, %xmm2
-; SSE-NEXT: pmulhuw %xmm5, %xmm2
-; SSE-NEXT: paddw %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: psraw $4, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm5[3],xmm2[4],xmm5[5,6],xmm2[7]
-; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: psraw $2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5],xmm2[6],xmm5[7]
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: psraw $1, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7]
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm8
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psraw $15, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [1,4,2,16,8,32,64,2]
+; SSE2-NEXT: pmulhuw %xmm9, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,65535,0,0,65535]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm11, %xmm4
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: pandn %xmm0, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [65535,0,65535,65535,0,65535,0,65535]
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: psraw $2, %xmm5
+; SSE2-NEXT: movdqa %xmm7, %xmm4
+; SSE2-NEXT: pandn %xmm5, %xmm4
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,0,65535,0,0,65535,0]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm10, %xmm0
+; SSE2-NEXT: psraw $1, %xmm4
+; SSE2-NEXT: movdqa %xmm10, %xmm5
+; SSE2-NEXT: pandn %xmm4, %xmm5
+; SSE2-NEXT: por %xmm0, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [0,65535,65535,65535,65535,65535,65535,65535]
+; SSE2-NEXT: pand %xmm12, %xmm5
+; SSE2-NEXT: movdqa %xmm12, %xmm0
+; SSE2-NEXT: pandn %xmm1, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: pmulhuw %xmm9, %xmm1
+; SSE2-NEXT: paddw %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pand %xmm11, %xmm5
+; SSE2-NEXT: psraw $4, %xmm1
+; SSE2-NEXT: movdqa %xmm11, %xmm6
+; SSE2-NEXT: pandn %xmm1, %xmm6
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: psraw $2, %xmm6
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: pandn %xmm6, %xmm5
+; SSE2-NEXT: por %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm10, %xmm1
+; SSE2-NEXT: psraw $1, %xmm5
+; SSE2-NEXT: movdqa %xmm10, %xmm6
+; SSE2-NEXT: pandn %xmm5, %xmm6
+; SSE2-NEXT: por %xmm1, %xmm6
+; SSE2-NEXT: pand %xmm12, %xmm6
+; SSE2-NEXT: movdqa %xmm12, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: psraw $15, %xmm5
+; SSE2-NEXT: pmulhuw %xmm9, %xmm5
+; SSE2-NEXT: paddw %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm11, %xmm6
+; SSE2-NEXT: psraw $4, %xmm5
+; SSE2-NEXT: movdqa %xmm11, %xmm4
+; SSE2-NEXT: pandn %xmm5, %xmm4
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: psraw $2, %xmm4
+; SSE2-NEXT: movdqa %xmm7, %xmm6
+; SSE2-NEXT: pandn %xmm4, %xmm6
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm10, %xmm4
+; SSE2-NEXT: psraw $1, %xmm6
+; SSE2-NEXT: movdqa %xmm10, %xmm5
+; SSE2-NEXT: pandn %xmm6, %xmm5
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm12, %xmm5
+; SSE2-NEXT: movdqa %xmm12, %xmm8
+; SSE2-NEXT: pandn %xmm2, %xmm8
+; SSE2-NEXT: por %xmm5, %xmm8
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: pmulhuw %xmm9, %xmm2
+; SSE2-NEXT: paddw %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm11, %xmm4
+; SSE2-NEXT: psraw $4, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm11
+; SSE2-NEXT: por %xmm4, %xmm11
+; SSE2-NEXT: movdqa %xmm11, %xmm2
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: psraw $2, %xmm11
+; SSE2-NEXT: pandn %xmm11, %xmm7
+; SSE2-NEXT: por %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm10, %xmm2
+; SSE2-NEXT: psraw $1, %xmm7
+; SSE2-NEXT: pandn %xmm7, %xmm10
+; SSE2-NEXT: por %xmm2, %xmm10
+; SSE2-NEXT: pand %xmm12, %xmm10
+; SSE2-NEXT: pandn %xmm3, %xmm12
+; SSE2-NEXT: por %xmm10, %xmm12
+; SSE2-NEXT: movdqa %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm12, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $15, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [1,4,2,16,8,32,64,2]
+; SSE41-NEXT: pmulhuw %xmm5, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm6
+; SSE41-NEXT: psraw $4, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3],xmm0[4],xmm6[5,6],xmm0[7]
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psraw $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
+; SSE41-NEXT: movdqa %xmm7, %xmm0
+; SSE41-NEXT: psraw $1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3],xmm0[4,5],xmm7[6],xmm0[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm1
+; SSE41-NEXT: psraw $15, %xmm1
+; SSE41-NEXT: pmulhuw %xmm5, %xmm1
+; SSE41-NEXT: paddw %xmm4, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm6
+; SSE41-NEXT: psraw $4, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3],xmm1[4],xmm6[5,6],xmm1[7]
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psraw $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
+; SSE41-NEXT: movdqa %xmm7, %xmm1
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3],xmm1[4,5],xmm7[6],xmm1[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm4
+; SSE41-NEXT: psraw $15, %xmm4
+; SSE41-NEXT: pmulhuw %xmm5, %xmm4
+; SSE41-NEXT: paddw %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm6
+; SSE41-NEXT: psraw $4, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3],xmm4[4],xmm6[5,6],xmm4[7]
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psraw $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
+; SSE41-NEXT: movdqa %xmm7, %xmm4
+; SSE41-NEXT: psraw $1, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: psraw $15, %xmm2
+; SSE41-NEXT: pmulhuw %xmm5, %xmm2
+; SSE41-NEXT: paddw %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: psraw $4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm5[3],xmm2[4],xmm5[5,6],xmm2[7]
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: psraw $2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5],xmm2[6],xmm5[7]
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: psraw $1, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v32i16:
; AVX1: # %bb.0:
@@ -895,30 +1206,56 @@ define <32 x i16> @combine_vec_sdiv_by_p
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_v4i32(<4 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrld $28, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrld $30, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE-NEXT: paddd %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psrad $3, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm2
-; SSE-NEXT: psrad $2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3,4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrld $28, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrld $29, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT: psrld $30, %xmm1
+; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $4, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrad $3, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $2, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrad $31, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrld $28, %xmm2
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrld $30, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psrad $3, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm2
+; SSE41-NEXT: psrad $2, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3,4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i32:
; AVX1: # %bb.0:
@@ -962,52 +1299,99 @@ define <4 x i32> @combine_vec_sdiv_by_po
}
define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: psrld $28, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrld $30, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm3
-; SSE-NEXT: pxor %xmm5, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; SSE-NEXT: paddd %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: psrad $3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm3
-; SSE-NEXT: psrad $2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrld $28, %xmm2
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrld $30, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; SSE-NEXT: paddd %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrad $3, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm3
-; SSE-NEXT: psrad $2, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrld $28, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrld $29, %xmm4
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSE2-NEXT: psrld $30, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[0,3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad $4, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad $3, %xmm4
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad $2, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm4[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrld $28, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrld $29, %xmm4
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSE2-NEXT: psrld $30, %xmm2
+; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm4[0,3]
+; SSE2-NEXT: paddd %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrad $4, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrad $3, %xmm4
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: psrad $2, %xmm3
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm4[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
+; SSE2-NEXT: movaps %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrad $31, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psrld $28, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrld $30, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm3
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; SSE41-NEXT: paddd %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psrad $3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm3
+; SSE41-NEXT: psrad $2, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrad $31, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: psrld $28, %xmm2
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrld $30, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; SSE41-NEXT: paddd %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: psrad $3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm3
+; SSE41-NEXT: psrad $2, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i32:
; AVX1: # %bb.0:
@@ -1075,94 +1459,185 @@ define <8 x i32> @combine_vec_sdiv_by_po
}
define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: psrld $28, %xmm0
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psrld $30, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm6
-; SSE-NEXT: pxor %xmm5, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
-; SSE-NEXT: paddd %xmm1, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: psrad $3, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm6[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm6
-; SSE-NEXT: psrad $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5],xmm7[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: psrld $28, %xmm1
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psrld $30, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
-; SSE-NEXT: paddd %xmm4, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: psrad $3, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm6
-; SSE-NEXT: psrad $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: psrld $28, %xmm4
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: psrld $30, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm4[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
-; SSE-NEXT: paddd %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm7
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: psrad $3, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm6
-; SSE-NEXT: psrad $2, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: psrld $28, %xmm6
-; SSE-NEXT: movdqa %xmm2, %xmm7
-; SSE-NEXT: psrld $30, %xmm7
-; SSE-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7]
-; SSE-NEXT: paddd %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: psrad $3, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm2
-; SSE-NEXT: psrad $2, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1],xmm5[2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm4, %xmm2
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrld $28, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: psrld $29, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: psrld $30, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[0,3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrad $4, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: psrad $3, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: psrad $2, %xmm5
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrld $28, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: psrld $29, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: psrld $30, %xmm1
+; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm6[0,3]
+; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrad $4, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: psrad $3, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrad $2, %xmm5
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm6[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3]
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: psrld $28, %xmm5
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: psrld $29, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: psrld $30, %xmm4
+; SSE2-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm6[0,3]
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: psrad $4, %xmm5
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: psrad $3, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: psrad $2, %xmm5
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm6[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: psrad $31, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: psrld $28, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: psrld $29, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm2[1]
+; SSE2-NEXT: psrld $30, %xmm5
+; SSE2-NEXT: pslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm6[0,3]
+; SSE2-NEXT: paddd %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: psrad $4, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: psrad $3, %xmm6
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: psrad $2, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm6[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
+; SSE2-NEXT: movaps %xmm4, %xmm2
+; SSE2-NEXT: movaps %xmm5, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm6
+; SSE41-NEXT: psrad $31, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm0
+; SSE41-NEXT: psrld $28, %xmm0
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psrld $30, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; SSE41-NEXT: paddd %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: movdqa %xmm6, %xmm0
+; SSE41-NEXT: psrad $3, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm6[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm6
+; SSE41-NEXT: psrad $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5],xmm7[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm6
+; SSE41-NEXT: psrad $31, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm1
+; SSE41-NEXT: psrld $28, %xmm1
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psrld $30, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; SSE41-NEXT: paddd %xmm4, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: movdqa %xmm6, %xmm1
+; SSE41-NEXT: psrad $3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm6
+; SSE41-NEXT: psrad $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm7[2,3],xmm1[4,5],xmm7[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: psrad $31, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm4
+; SSE41-NEXT: psrld $28, %xmm4
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: psrld $30, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
+; SSE41-NEXT: paddd %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm7
+; SSE41-NEXT: movdqa %xmm6, %xmm4
+; SSE41-NEXT: psrad $3, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm6
+; SSE41-NEXT: psrad $2, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2,3],xmm4[4,5],xmm7[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: psrad $31, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: psrld $28, %xmm6
+; SSE41-NEXT: movdqa %xmm2, %xmm7
+; SSE41-NEXT: psrld $30, %xmm7
+; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7]
+; SSE41-NEXT: paddd %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: psrad $3, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm2
+; SSE41-NEXT: psrad $2, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1],xmm5[2,3,4,5,6,7]
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v16i32:
; AVX1: # %bb.0:
@@ -1305,23 +1780,42 @@ define <16 x i32> @combine_vec_sdiv_by_p
}
define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: psrlq $62, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: paddq %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrlq $2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: psubq %xmm2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: psrlq $62, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE2-NEXT: paddq %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: psrlq $2, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT: xorpd %xmm2, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrad $31, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: psrlq $62, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: paddq %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: psrlq $2, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: psubq %xmm2, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX1: # %bb.0:
@@ -1396,38 +1890,72 @@ define <2 x i64> @combine_vec_sdiv_by_po
}
define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrlq $60, %xmm3
-; SSE-NEXT: psrlq $61, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: paddq %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlq $4, %xmm2
-; SSE-NEXT: psrlq $3, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: psubq %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE-NEXT: psrlq $62, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: paddq %xmm0, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm2
-; SSE-NEXT: psrlq $2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
-; SSE-NEXT: pxor %xmm3, %xmm2
-; SSE-NEXT: psubq %xmm3, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq $61, %xmm3
+; SSE2-NEXT: psrlq $60, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT: paddq %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrlq $3, %xmm2
+; SSE2-NEXT: psrlq $4, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
+; SSE2-NEXT: xorpd %xmm2, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: psrlq $62, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT: paddq %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: psrlq $2, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT: xorpd %xmm3, %xmm2
+; SSE2-NEXT: psubq %xmm3, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrad $31, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrlq $60, %xmm3
+; SSE41-NEXT: psrlq $61, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: paddq %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlq $4, %xmm2
+; SSE41-NEXT: psrlq $3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1152921504606846976,576460752303423488]
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: psubq %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrad $31, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: psrlq $62, %xmm2
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: paddq %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm2
+; SSE41-NEXT: psrlq $2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952]
+; SSE41-NEXT: pxor %xmm3, %xmm2
+; SSE41-NEXT: psubq %xmm3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i64:
; AVX1: # %bb.0:
@@ -1513,65 +2041,128 @@ define <4 x i64> @combine_vec_sdiv_by_po
}
define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: psrlq $60, %xmm5
-; SSE-NEXT: psrlq $61, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
-; SSE-NEXT: paddq %xmm3, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrlq $4, %xmm3
-; SSE-NEXT: psrlq $3, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
-; SSE-NEXT: pxor %xmm5, %xmm1
-; SSE-NEXT: psubq %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE-NEXT: movdqa %xmm3, %xmm6
-; SSE-NEXT: psrlq $60, %xmm6
-; SSE-NEXT: psrlq $61, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: paddq %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $4, %xmm4
-; SSE-NEXT: psrlq $3, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
-; SSE-NEXT: pxor %xmm5, %xmm3
-; SSE-NEXT: psubq %xmm5, %xmm3
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE-NEXT: psrlq $62, %xmm4
-; SSE-NEXT: pxor %xmm5, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; SSE-NEXT: paddq %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: psrlq $2, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,2305843009213693952]
-; SSE-NEXT: pxor %xmm4, %xmm6
-; SSE-NEXT: psubq %xmm4, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE-NEXT: psrlq $62, %xmm6
-; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
-; SSE-NEXT: paddq %xmm2, %xmm6
-; SSE-NEXT: movdqa %xmm6, %xmm5
-; SSE-NEXT: psrlq $2, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
-; SSE-NEXT: pxor %xmm4, %xmm5
-; SSE-NEXT: psubq %xmm4, %xmm5
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psrlq $61, %xmm5
+; SSE2-NEXT: psrlq $60, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
+; SSE2-NEXT: paddq %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq $3, %xmm3
+; SSE2-NEXT: psrlq $4, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
+; SSE2-NEXT: xorpd %xmm5, %xmm1
+; SSE2-NEXT: psubq %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: psrad $31, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: psrlq $61, %xmm6
+; SSE2-NEXT: psrlq $60, %xmm3
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1]
+; SSE2-NEXT: paddq %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: psrlq $3, %xmm4
+; SSE2-NEXT: psrlq $4, %xmm3
+; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
+; SSE2-NEXT: xorpd %xmm5, %xmm3
+; SSE2-NEXT: psubq %xmm5, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: psrlq $62, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm5 = xmm5[1],xmm4[1]
+; SSE2-NEXT: paddq %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: psrlq $2, %xmm4
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1]
+; SSE2-NEXT: movapd {{.*#+}} xmm7 = [9223372036854775808,2305843009213693952]
+; SSE2-NEXT: xorpd %xmm7, %xmm4
+; SSE2-NEXT: psubq %xmm7, %xmm4
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: psrlq $62, %xmm0
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm0[1]
+; SSE2-NEXT: paddq %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: psrlq $2, %xmm5
+; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1]
+; SSE2-NEXT: xorpd %xmm7, %xmm5
+; SSE2-NEXT: psubq %xmm7, %xmm5
+; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1]
+; SSE2-NEXT: movapd %xmm4, %xmm0
+; SSE2-NEXT: movapd %xmm5, %xmm2
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrad $31, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: movdqa %xmm1, %xmm5
+; SSE41-NEXT: psrlq $60, %xmm5
+; SSE41-NEXT: psrlq $61, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: paddq %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrlq $4, %xmm3
+; SSE41-NEXT: psrlq $3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [1152921504606846976,576460752303423488]
+; SSE41-NEXT: pxor %xmm5, %xmm1
+; SSE41-NEXT: psubq %xmm5, %xmm1
+; SSE41-NEXT: movdqa %xmm4, %xmm3
+; SSE41-NEXT: psrad $31, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT: movdqa %xmm3, %xmm6
+; SSE41-NEXT: psrlq $60, %xmm6
+; SSE41-NEXT: psrlq $61, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: paddq %xmm4, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psrlq $4, %xmm4
+; SSE41-NEXT: psrlq $3, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: pxor %xmm5, %xmm3
+; SSE41-NEXT: psubq %xmm5, %xmm3
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: psrad $31, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE41-NEXT: psrlq $62, %xmm4
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
+; SSE41-NEXT: paddq %xmm0, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm6
+; SSE41-NEXT: psrlq $2, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,2305843009213693952]
+; SSE41-NEXT: pxor %xmm4, %xmm6
+; SSE41-NEXT: psubq %xmm4, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: psrad $31, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT: psrlq $62, %xmm6
+; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
+; SSE41-NEXT: paddq %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm6, %xmm5
+; SSE41-NEXT: psrlq $2, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: pxor %xmm4, %xmm5
+; SSE41-NEXT: psubq %xmm4, %xmm5
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i64:
; AVX1: # %bb.0:
@@ -1705,33 +2296,63 @@ define <8 x i64> @combine_vec_sdiv_by_po
}
define <4 x i32> @combine_vec_sdiv_by_pow2b_PosAndNeg(<4 x i32> %x) {
-; SSE-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrld $28, %xmm1
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psrld $30, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrld $29, %xmm2
-; SSE-NEXT: pxor %xmm4, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; SSE-NEXT: paddd %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrad $3, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrad $4, %xmm2
-; SSE-NEXT: psrad $2, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
-; SSE-NEXT: psubd %xmm1, %xmm4
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $28, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrld $29, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT: psrld $30, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $4, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad $3, %xmm3
+; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $2, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psrad $31, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: psrld $28, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psrld $30, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrld $29, %xmm2
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: psrad $3, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrad $4, %xmm2
+; SSE41-NEXT: psrad $2, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; SSE41-NEXT: psubd %xmm1, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
; AVX1: # %bb.0:
@@ -1808,15 +2429,25 @@ define <4 x i32> @combine_vec_sdiv_by_po
; PR37119
define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) {
-; SSE-LABEL: non_splat_minus_one_divisor_0:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: psubb %xmm0, %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: non_splat_minus_one_divisor_0:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: psubb %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: non_splat_minus_one_divisor_0:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: psubb %xmm0, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: non_splat_minus_one_divisor_0:
; AVX1: # %bb.0:
@@ -1862,42 +2493,78 @@ define <16 x i8> @non_splat_minus_one_di
}
define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
-; SSE-LABEL: non_splat_minus_one_divisor_1:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: pcmpgtb %xmm0, %xmm3
-; SSE-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: psllw $1, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4,5],xmm0[6],xmm4[7]
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE-NEXT: pmullw {{.*}}(%rip), %xmm3
-; SSE-NEXT: psrlw $8, %xmm3
-; SSE-NEXT: packuswb %xmm3, %xmm0
-; SSE-NEXT: paddb %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: psraw $8, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psllw $7, %xmm4
-; SSE-NEXT: psllw $8, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5],xmm4[6],xmm3[7]
-; SSE-NEXT: psrlw $8, %xmm3
-; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: packuswb %xmm0, %xmm3
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
-; SSE-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; SSE-NEXT: psubb %xmm1, %xmm2
-; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255]
-; SSE-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: non_splat_minus_one_divisor_1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: paddb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm3
+; SSE2-NEXT: psrlw $8, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm1, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255]
+; SSE2-NEXT: psubb %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm3, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: non_splat_minus_one_divisor_1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pcmpgtb %xmm0, %xmm3
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: psllw $1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4,5],xmm0[6],xmm4[7]
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3
+; SSE41-NEXT: psrlw $8, %xmm3
+; SSE41-NEXT: packuswb %xmm3, %xmm0
+; SSE41-NEXT: paddb %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE41-NEXT: psraw $8, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: psllw $7, %xmm4
+; SSE41-NEXT: psllw $8, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5],xmm4[6],xmm3[7]
+; SSE41-NEXT: psrlw $8, %xmm3
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE41-NEXT: psraw $8, %xmm0
+; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm3
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: psubb %xmm1, %xmm2
+; SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: non_splat_minus_one_divisor_1:
; AVX1: # %bb.0:
@@ -2011,19 +2678,34 @@ define <16 x i8> @non_splat_minus_one_di
}
define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
-; SSE-LABEL: non_splat_minus_one_divisor_2:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $31, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: paddd %xmm0, %xmm1
-; SSE-NEXT: psrad $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psubd %xmm1, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: non_splat_minus_one_divisor_2:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrld $31, %xmm2
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: psrad $1, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm2[1,2]
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: non_splat_minus_one_divisor_2:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $31, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: psrad $1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psubd %xmm1, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: non_splat_minus_one_divisor_2:
; AVX1: # %bb.0:
@@ -2085,17 +2767,29 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform2(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform2:
-; SSE: # %bb.0:
-; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psraw $2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrlw $15, %xmm0
-; SSE-NEXT: paddw %xmm2, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform2:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psraw $2, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psraw $1, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
+; SSE2-NEXT: psrlw $15, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform2:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psraw $2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE41-NEXT: psrlw $15, %xmm0
+; SSE41-NEXT: paddw %xmm2, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform2:
; AVX1: # %bb.0:
@@ -2147,20 +2841,35 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform3(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform3:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [45591,45591,45591,45591,32833,32833,32833,32833]
-; SSE-NEXT: pmulhw %xmm0, %xmm1
-; SSE-NEXT: paddw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $4, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrlw $15, %xmm1
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform3:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [45591,45591,45591,45591,32833,32833,32833,32833]
+; SSE2-NEXT: pmulhw %xmm0, %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: psrlw $15, %xmm1
+; SSE2-NEXT: paddw %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform3:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [45591,45591,45591,45591,32833,32833,32833,32833]
+; SSE41-NEXT: pmulhw %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psraw $8, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $4, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrlw $15, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform3:
; AVX1: # %bb.0:
@@ -2217,20 +2926,35 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform4(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform4:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [19945,19945,19945,19945,32639,32639,32639,32639]
-; SSE-NEXT: pmulhw %xmm0, %xmm1
-; SSE-NEXT: psubw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $4, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: psrlw $15, %xmm1
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform4:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [19945,19945,19945,19945,32639,32639,32639,32639]
+; SSE2-NEXT: pmulhw %xmm0, %xmm1
+; SSE2-NEXT: psubw %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psraw $4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: psrlw $15, %xmm1
+; SSE2-NEXT: paddw %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform4:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [19945,19945,19945,19945,32639,32639,32639,32639]
+; SSE41-NEXT: pmulhw %xmm0, %xmm1
+; SSE41-NEXT: psubw %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psraw $8, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $4, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: psrlw $15, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform4:
; AVX1: # %bb.0:
@@ -2287,27 +3011,62 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform5(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform5:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,0,0,1,1]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psraw $8, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $4, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psraw $2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $1, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7]
-; SSE-NEXT: psrlw $15, %xmm0
-; SSE-NEXT: paddw %xmm2, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform5:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,0,0,1,1]
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,0]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psraw $8, %xmm3
+; SSE2-NEXT: pandn %xmm3, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,0,65535,65535,65535,0,65535]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $4, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,0,65535]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: psraw $2, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,0,65535]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $1, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psrlw $15, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform5:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,0,0,1,1]
+; SSE41-NEXT: pmullw %xmm0, %xmm1
+; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $8, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7]
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $4, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7]
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: psraw $2, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $1, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7]
+; SSE41-NEXT: psrlw $15, %xmm0
+; SSE41-NEXT: paddw %xmm2, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform5:
; AVX1: # %bb.0:
@@ -2376,30 +3135,65 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform6(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform6:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,1,1,1,0]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psraw $8, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psraw $4, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psraw $2, %xmm3
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5],xmm3[6],xmm1[7]
-; SSE-NEXT: psrlw $15, %xmm0
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform6:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,1,1,1,0]
+; SSE2-NEXT: pmullw %xmm0, %xmm1
+; SSE2-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psraw $8, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm0, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,0,65535,0]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $4, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,0,65535,65535]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: psraw $2, %xmm2
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,65535,0]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: psraw $1, %xmm1
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: psrlw $15, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform6:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,1,1,1,0]
+; SSE41-NEXT: pmullw %xmm0, %xmm1
+; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $8, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7]
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psraw $4, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psraw $2, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7]
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5],xmm3[6],xmm1[7]
+; SSE41-NEXT: psrlw $15, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
+; SSE41-NEXT: paddw %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform6:
; AVX1: # %bb.0:
@@ -2478,12 +3272,19 @@ define <8 x i16> @combine_vec_sdiv_nonun
}
define <8 x i16> @combine_vec_sdiv_nonuniform7(<8 x i16> %x) {
-; SSE-LABEL: combine_vec_sdiv_nonuniform7:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: psubw %xmm0, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_vec_sdiv_nonuniform7:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psubw %xmm0, %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: combine_vec_sdiv_nonuniform7:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psubw %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
;
; AVX1-LABEL: combine_vec_sdiv_nonuniform7:
; AVX1: # %bb.0:
More information about the llvm-commits
mailing list