[llvm] r263917 - [X86][SSE] Add vector integer division by constant tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 20 14:46:59 PDT 2016
Author: rksimon
Date: Sun Mar 20 16:46:58 2016
New Revision: 263917
URL: http://llvm.org/viewvc/llvm-project?rev=263917&view=rev
Log:
[X86][SSE] Add vector integer division by constant tests
Expanded tests and split into sdiv/srem and udiv/urem cases for 128 and 256 bit vectors.
Added:
llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll
llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll
llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
Modified:
llvm/trunk/test/CodeGen/X86/vector-idiv.ll
Added: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll?rev=263917&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-128.ll Sun Mar 20 16:46:58 2016
@@ -0,0 +1,1586 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+;
+; sdiv by 7
+;
+
+define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
+; SSE2-LABEL: test_div7_2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
+; SSE2-NEXT: imulq %rcx
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: movd %rdx, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rax
+; SSE2-NEXT: imulq %rcx
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: movd %rdx, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rax
+; SSE41-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
+; SSE41-NEXT: imulq %rcx
+; SSE41-NEXT: movq %rdx, %rax
+; SSE41-NEXT: shrq $63, %rax
+; SSE41-NEXT: sarq %rdx
+; SSE41-NEXT: addq %rax, %rdx
+; SSE41-NEXT: movd %rdx, %xmm1
+; SSE41-NEXT: movd %xmm0, %rax
+; SSE41-NEXT: imulq %rcx
+; SSE41-NEXT: movq %rdx, %rax
+; SSE41-NEXT: shrq $63, %rax
+; SSE41-NEXT: sarq %rdx
+; SSE41-NEXT: addq %rax, %rdx
+; SSE41-NEXT: movd %rdx, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_div7_2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %res = sdiv <2 x i64> %a, <i64 7, i64 7>
+ ret <2 x i64> %res
+}
+
+define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
+; SSE2-LABEL: test_div7_4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $31, %xmm0
+; SSE2-NEXT: psrad $2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm2, %xmm3
+; SSE41-NEXT: pmuldq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm1
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_div7_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_div7_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX2-NEXT: vpsrad $2, %xmm0, %xmm0
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %res = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %res
+}
+
+define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
+; SSE-LABEL: test_div7_8i16:
+; SSE: # BB#0:
+; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlw $15, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_div7_8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %res
+}
+
+define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
+; SSE2-LABEL: test_div7_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: imull $-109, %eax, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: shrb $7, %al
+; SSE2-NEXT: sarb $2, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: imull $-109, %esi, %edi
+; SSE2-NEXT: shrl $8, %edi
+; SSE2-NEXT: addb %sil, %dil
+; SSE2-NEXT: movb %dil, %bl
+; SSE2-NEXT: shrb $7, %bl
+; SSE2-NEXT: sarb $2, %dil
+; SSE2-NEXT: addb %bl, %dil
+; SSE2-NEXT: movzbl %dil, %esi
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %eax, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: addb %al, %sil
+; SSE2-NEXT: movb %sil, %al
+; SSE2-NEXT: shrb $7, %al
+; SSE2-NEXT: sarb $2, %sil
+; SSE2-NEXT: addb %al, %sil
+; SSE2-NEXT: movzbl %sil, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: imull $-109, %edi, %ebx
+; SSE2-NEXT: shrl $8, %ebx
+; SSE2-NEXT: addb %dil, %bl
+; SSE2-NEXT: movb %bl, %al
+; SSE2-NEXT: shrb $7, %al
+; SSE2-NEXT: sarb $2, %bl
+; SSE2-NEXT: addb %al, %bl
+; SSE2-NEXT: movzbl %bl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: imull $-109, %edx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: imull $-109, %esi, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: imull $-109, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: imull $-109, %eax, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: addb %al, %dl
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: shrb $7, %al
+; SSE2-NEXT: sarb $2, %dl
+; SSE2-NEXT: addb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r14d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r14b, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: imull $-109, %ebp, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %bpl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: imull $-109, %r11d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r11b, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: imull $-109, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r9d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r9b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: imull $-109, %r10d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r10b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r8d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r8b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: imull $-109, %eax, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: shrb $7, %al
+; SSE2-NEXT: sarb $2, %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: movsbl %cl, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %edx
+; SSE41-NEXT: shrl $8, %edx
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: movb %cl, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %cl
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: movzbl %cl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_div7_16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %res = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %res
+}
+
+;
+; srem by 7
+;
+
+define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
+; SSE2-LABEL: test_rem7_2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: imulq %rsi
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: leaq (,%rdx,8), %rax
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movd %rcx, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: imulq %rsi
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: leaq (,%rdx,8), %rax
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movd %rcx, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rcx
+; SSE41-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: imulq %rsi
+; SSE41-NEXT: movq %rdx, %rax
+; SSE41-NEXT: shrq $63, %rax
+; SSE41-NEXT: sarq %rdx
+; SSE41-NEXT: addq %rax, %rdx
+; SSE41-NEXT: leaq (,%rdx,8), %rax
+; SSE41-NEXT: subq %rdx, %rax
+; SSE41-NEXT: subq %rax, %rcx
+; SSE41-NEXT: movd %rcx, %xmm1
+; SSE41-NEXT: movd %xmm0, %rcx
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: imulq %rsi
+; SSE41-NEXT: movq %rdx, %rax
+; SSE41-NEXT: shrq $63, %rax
+; SSE41-NEXT: sarq %rdx
+; SSE41-NEXT: addq %rax, %rdx
+; SSE41-NEXT: leaq (,%rdx,8), %rax
+; SSE41-NEXT: subq %rdx, %rax
+; SSE41-NEXT: subq %rax, %rcx
+; SSE41-NEXT: movd %rcx, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_rem7_2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %res = srem <2 x i64> %a, <i64 7, i64 7>
+ ret <2 x i64> %res
+}
+
+define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
+; SSE2-LABEL: test_rem7_4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrld $31, %xmm2
+; SSE2-NEXT: psrad $2, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: psubd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuldq %xmm2, %xmm3
+; SSE41-NEXT: pmuldq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrld $31, %xmm2
+; SSE41-NEXT: psrad $2, %xmm1
+; SSE41-NEXT: paddd %xmm2, %xmm1
+; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
+; SSE41-NEXT: psubd %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_rem7_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $31, %xmm1, %xmm2
+; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_rem7_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
+; AVX2-NEXT: vpsrad $2, %xmm1, %xmm1
+; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %res = srem <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %res
+}
+
+define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
+; SSE-LABEL: test_rem7_8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [18725,18725,18725,18725,18725,18725,18725,18725]
+; SSE-NEXT: pmulhw %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrlw $15, %xmm2
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm2, %xmm1
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_rem7_8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $15, %xmm1, %xmm2
+; AVX-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = srem <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %res
+}
+
+define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
+; SSE2-LABEL: test_rem7_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: imull $-109, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movb $7, %r11b
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r15d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: imull $-109, %ebp, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %bpl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bpl
+; SSE2-NEXT: movzbl %bpl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %edi, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %dil, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dil
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: imull $-109, %edx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: imull $-109, %ebx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %bl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bl
+; SSE2-NEXT: movzbl %bl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: imull $-109, %ebp, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %bpl, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bpl
+; SSE2-NEXT: movzbl %bpl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: imull $-109, %esi, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %sil
+; SSE2-NEXT: movzbl %sil, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: imull $-109, %edx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: movb %al, %bl
+; SSE2-NEXT: shrb $7, %bl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %bl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r15d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r15b, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r15b
+; SSE2-NEXT: movzbl %r15b, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: imull $-109, %edi, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %dil, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dil
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: imull $-109, %r14d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r14b, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r14b
+; SSE2-NEXT: movzbl %r14b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: imull $-109, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r9d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r9b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r9b
+; SSE2-NEXT: movzbl %r9b, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: imull $-109, %r10d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r10b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r10b
+; SSE2-NEXT: movzbl %r10b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: imull $-109, %r8d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %r8b, %al
+; SSE2-NEXT: movb %al, %cl
+; SSE2-NEXT: shrb $7, %cl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r8b
+; SSE2-NEXT: movzbl %r8b, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: imull $-109, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: movb %al, %dl
+; SSE2-NEXT: shrb $7, %dl
+; SSE2-NEXT: sarb $2, %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %edx
+; SSE41-NEXT: imull $-109, %edx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb $7, %dil
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %edx
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %esi
+; SSE41-NEXT: imull $-109, %esi, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %sil
+; SSE41-NEXT: movzbl %sil, %eax
+; SSE41-NEXT: movd %eax, %xmm1
+; SSE41-NEXT: pinsrb $1, %edx, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %eax
+; SSE41-NEXT: shrl $8, %eax
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %dl, %al
+; SSE41-NEXT: mulb %dil
+; SSE41-NEXT: subb %al, %cl
+; SSE41-NEXT: movzbl %cl, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_rem7_16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %edx
+; AVX-NEXT: imull $-109, %edx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb $7, %dil
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %edx
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %esi
+; AVX-NEXT: imull $-109, %esi, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %sil
+; AVX-NEXT: movzbl %sil, %eax
+; AVX-NEXT: vmovd %eax, %xmm1
+; AVX-NEXT: vpinsrb $1, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %res = srem <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %res
+}
Added: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll?rev=263917&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll Sun Mar 20 16:46:58 2016
@@ -0,0 +1,964 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+;
+; sdiv by 7
+;
+
+define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
+; AVX-LABEL: test_div7_4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrq $1, %xmm1, %rax
+; AVX-NEXT: movabsq $5270498306774157605, %rcx # imm = 0x4924924924924925
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm2
+; AVX-NEXT: vmovq %xmm1, %rax
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX-NEXT: vpextrq $1, %xmm0, %rax
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm2
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: imulq %rcx
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: vmovq %rdx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = sdiv <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
+ ret <4 x i64> %res
+}
+
+define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
+; AVX-LABEL: test_div7_8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vpsrld $31, %ymm0, %ymm1
+; AVX-NEXT: vpsrad $2, %ymm0, %ymm0
+; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %res
+}
+
+define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
+; AVX-LABEL: test_div7_16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $15, %ymm0, %ymm1
+; AVX-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %res
+}
+
+define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
+; AVX-LABEL: test_div7_32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = sdiv <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <32 x i8> %res
+}
+
+;
+; srem by 7
+;
+
+define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
+; AVX-LABEL: test_rem7_4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX-NEXT: movabsq $5270498306774157605, %rsi # imm = 0x4924924924924925
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm1, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: imulq %rsi
+; AVX-NEXT: movq %rdx, %rax
+; AVX-NEXT: shrq $63, %rax
+; AVX-NEXT: sarq %rdx
+; AVX-NEXT: addq %rax, %rdx
+; AVX-NEXT: leaq (,%rdx,8), %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: subq %rax, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = srem <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
+ ret <4 x i64> %res
+}
+
+define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
+; AVX-LABEL: test_rem7_8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm1
+; AVX-NEXT: vpsrld $31, %ymm1, %ymm2
+; AVX-NEXT: vpsrad $2, %ymm1, %ymm1
+; AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = srem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %res
+}
+
+define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
+; AVX-LABEL: test_rem7_16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm1
+; AVX-NEXT: vpsrlw $15, %ymm1, %ymm2
+; AVX-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = srem <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %res
+}
+
+define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
+; AVX-LABEL: test_rem7_32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %edx
+; AVX-NEXT: imull $-109, %edx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb $7, %dil
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %edx
+; AVX-NEXT: vpextrb $0, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %esi
+; AVX-NEXT: imull $-109, %esi, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %sil
+; AVX-NEXT: movzbl %sil, %eax
+; AVX-NEXT: vmovd %eax, %xmm2
+; AVX-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %esi
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %edx
+; AVX-NEXT: imull $-109, %edx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vmovd %eax, %xmm2
+; AVX-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %ecx
+; AVX-NEXT: imull $-109, %ecx, %eax
+; AVX-NEXT: shrl $8, %eax
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %dl, %al
+; AVX-NEXT: mulb %dil
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = srem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <32 x i8> %res
+}
Added: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll?rev=263917&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-128.ll Sun Mar 20 16:46:58 2016
@@ -0,0 +1,1470 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+;
+; udiv by 7
+;
+
+define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind {
+; SSE2-LABEL: test_div7_2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rsi
+; SSE2-NEXT: subq %rdx, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: addq %rdx, %rcx
+; SSE2-NEXT: shrq $2, %rcx
+; SSE2-NEXT: movd %rcx, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rsi
+; SSE2-NEXT: subq %rdx, %rcx
+; SSE2-NEXT: shrq %rcx
+; SSE2-NEXT: addq %rdx, %rcx
+; SSE2-NEXT: shrq $2, %rcx
+; SSE2-NEXT: movd %rcx, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rcx
+; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: mulq %rsi
+; SSE41-NEXT: subq %rdx, %rcx
+; SSE41-NEXT: shrq %rcx
+; SSE41-NEXT: addq %rdx, %rcx
+; SSE41-NEXT: shrq $2, %rcx
+; SSE41-NEXT: movd %rcx, %xmm1
+; SSE41-NEXT: movd %xmm0, %rcx
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: mulq %rsi
+; SSE41-NEXT: subq %rdx, %rcx
+; SSE41-NEXT: shrq %rcx
+; SSE41-NEXT: addq %rdx, %rcx
+; SSE41-NEXT: shrq $2, %rcx
+; SSE41-NEXT: movd %rcx, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_div7_2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %res = udiv <2 x i64> %a, <i64 7, i64 7>
+ ret <2 x i64> %res
+}
+
+define <4 x i32> @test_div7_4i32(<4 x i32> %a) nounwind {
+; SSE2-LABEL: test_div7_4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: psubd %xmm2, %xmm0
+; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: psrld $2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: psubd %xmm1, %xmm0
+; SSE41-NEXT: psrld $1, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
+; SSE41-NEXT: psrld $2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_div7_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_div7_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %res = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %res
+}
+
+define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind {
+; SSE-LABEL: test_div7_8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE-NEXT: pmulhuw %xmm0, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: psrlw $2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_div7_8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %res
+}
+
+define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
+; SSE2-LABEL: test_div7_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: imull $37, %eax, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: imull $37, %esi, %edi
+; SSE2-NEXT: shrl $8, %edi
+; SSE2-NEXT: subb %dil, %sil
+; SSE2-NEXT: shrb %sil
+; SSE2-NEXT: addb %dil, %sil
+; SSE2-NEXT: shrb $2, %sil
+; SSE2-NEXT: movzbl %sil, %esi
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $37, %eax, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: subb %sil, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: imull $37, %edi, %ebp
+; SSE2-NEXT: shrl $8, %ebp
+; SSE2-NEXT: subb %bpl, %dil
+; SSE2-NEXT: shrb %dil
+; SSE2-NEXT: addb %bpl, %dil
+; SSE2-NEXT: shrb $2, %dil
+; SSE2-NEXT: movzbl %dil, %edi
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: imull $37, %edx, %edi
+; SSE2-NEXT: shrl $8, %edi
+; SSE2-NEXT: subb %dil, %dl
+; SSE2-NEXT: shrb %dl
+; SSE2-NEXT: addb %dil, %dl
+; SSE2-NEXT: shrb $2, %dl
+; SSE2-NEXT: movzbl %dl, %edx
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: imull $37, %esi, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: subb %dl, %sil
+; SSE2-NEXT: shrb %sil
+; SSE2-NEXT: addb %dl, %sil
+; SSE2-NEXT: shrb $2, %sil
+; SSE2-NEXT: movzbl %sil, %edx
+; SSE2-NEXT: movd %edx, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: imull $37, %ecx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: subb %dl, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: addb %dl, %cl
+; SSE2-NEXT: shrb $2, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: imull $37, %edx, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: subb %sil, %dl
+; SSE2-NEXT: shrb %dl
+; SSE2-NEXT: addb %sil, %dl
+; SSE2-NEXT: shrb $2, %dl
+; SSE2-NEXT: movzbl %dl, %edx
+; SSE2-NEXT: movd %edx, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $37, %ebx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: subb %dl, %bl
+; SSE2-NEXT: shrb %bl
+; SSE2-NEXT: addb %dl, %bl
+; SSE2-NEXT: shrb $2, %bl
+; SSE2-NEXT: movzbl %bl, %edx
+; SSE2-NEXT: movd %edx, %xmm2
+; SSE2-NEXT: imull $37, %eax, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: imull $37, %r11d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: subb %al, %r11b
+; SSE2-NEXT: shrb %r11b
+; SSE2-NEXT: addb %al, %r11b
+; SSE2-NEXT: shrb $2, %r11b
+; SSE2-NEXT: movzbl %r11b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: imull $37, %ecx, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: shrb %cl
+; SSE2-NEXT: addb %al, %cl
+; SSE2-NEXT: shrb $2, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: imull $37, %r9d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: subb %al, %r9b
+; SSE2-NEXT: shrb %r9b
+; SSE2-NEXT: addb %al, %r9b
+; SSE2-NEXT: shrb $2, %r9b
+; SSE2-NEXT: movzbl %r9b, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: imull $37, %r10d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: subb %al, %r10b
+; SSE2-NEXT: shrb %r10b
+; SSE2-NEXT: addb %al, %r10b
+; SSE2-NEXT: shrb $2, %r10b
+; SSE2-NEXT: movzbl %r10b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: imull $37, %r8d, %eax
+; SSE2-NEXT: shrl $8, %eax
+; SSE2-NEXT: subb %al, %r8b
+; SSE2-NEXT: shrb %r8b
+; SSE2-NEXT: addb %al, %r8b
+; SSE2-NEXT: shrb $2, %r8b
+; SSE2-NEXT: movzbl %r8b, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: imull $37, %eax, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_div7_16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: imull $37, %ecx, %edx
+; SSE41-NEXT: shrl $8, %edx
+; SSE41-NEXT: subb %dl, %cl
+; SSE41-NEXT: shrb %cl
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: shrb $2, %cl
+; SSE41-NEXT: movzbl %cl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: imull $37, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_div7_16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: imull $37, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: subb %dl, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: shrb $2, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %res = udiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %res
+}
+
+;
+; urem by 7
+;
+
+define <2 x i64> @test_rem7_2i64(<2 x i64> %a) nounwind {
+; SSE2-LABEL: test_rem7_2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rsi
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: shrq %rax
+; SSE2-NEXT: addq %rdx, %rax
+; SSE2-NEXT: shrq $2, %rax
+; SSE2-NEXT: leaq (,%rax,8), %rdx
+; SSE2-NEXT: subq %rax, %rdx
+; SSE2-NEXT: subq %rdx, %rcx
+; SSE2-NEXT: movd %rcx, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rsi
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: shrq %rax
+; SSE2-NEXT: addq %rdx, %rax
+; SSE2-NEXT: shrq $2, %rax
+; SSE2-NEXT: leaq (,%rax,8), %rdx
+; SSE2-NEXT: subq %rax, %rdx
+; SSE2-NEXT: subq %rdx, %rcx
+; SSE2-NEXT: movd %rcx, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_2i64:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrq $1, %xmm0, %rcx
+; SSE41-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: mulq %rsi
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: subq %rdx, %rax
+; SSE41-NEXT: shrq %rax
+; SSE41-NEXT: addq %rdx, %rax
+; SSE41-NEXT: shrq $2, %rax
+; SSE41-NEXT: leaq (,%rax,8), %rdx
+; SSE41-NEXT: subq %rax, %rdx
+; SSE41-NEXT: subq %rdx, %rcx
+; SSE41-NEXT: movd %rcx, %xmm1
+; SSE41-NEXT: movd %xmm0, %rcx
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: mulq %rsi
+; SSE41-NEXT: movq %rcx, %rax
+; SSE41-NEXT: subq %rdx, %rax
+; SSE41-NEXT: shrq %rax
+; SSE41-NEXT: addq %rdx, %rax
+; SSE41-NEXT: shrq $2, %rax
+; SSE41-NEXT: leaq (,%rax,8), %rdx
+; SSE41-NEXT: subq %rax, %rdx
+; SSE41-NEXT: subq %rdx, %rcx
+; SSE41-NEXT: movd %rcx, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_rem7_2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+ %res = urem <2 x i64> %a, <i64 7, i64 7>
+ ret <2 x i64> %res
+}
+
+define <4 x i32> @test_rem7_4i32(<4 x i32> %a) nounwind {
+; SSE2-LABEL: test_rem7_4i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pmuludq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: psrld $2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: psubd %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_4i32:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psubd %xmm1, %xmm2
+; SSE41-NEXT: psrld $1, %xmm2
+; SSE41-NEXT: paddd %xmm1, %xmm2
+; SSE41-NEXT: psrld $2, %xmm2
+; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2
+; SSE41-NEXT: psubd %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: test_rem7_4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $2, %xmm1, %xmm1
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_rem7_4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX2-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT: vpsrld $2, %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
+; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+ %res = urem <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %res
+}
+
+define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind {
+; SSE-LABEL: test_rem7_8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE-NEXT: pmulhuw %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: paddw %xmm1, %xmm2
+; SSE-NEXT: psrlw $2, %xmm2
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_rem7_8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX-NEXT: vpsrlw $2, %xmm1, %xmm1
+; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = urem <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %res
+}
+
+define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
+; SSE2-LABEL: test_rem7_16i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: imull $37, %ecx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: movb $7, %r11b
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: imull $37, %ebp, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %bpl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bpl
+; SSE2-NEXT: movzbl %bpl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $37, %edi, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %dil, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dil
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: imull $37, %edx, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: subb %sil, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: imull $37, %ebx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %bl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bl
+; SSE2-NEXT: movzbl %bl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: imull $37, %ebp, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %bpl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %bpl
+; SSE2-NEXT: movzbl %bpl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-NEXT: imull $37, %ecx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: imull $37, %edx, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: movb %dl, %al
+; SSE2-NEXT: subb %sil, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %sil, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: imull $37, %r15d, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %r15b, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r15b
+; SSE2-NEXT: movzbl %r15b, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: imull $37, %edi, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %dil, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %dil
+; SSE2-NEXT: movzbl %dil, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: imull $37, %r14d, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %r14b, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r14b
+; SSE2-NEXT: movzbl %r14b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: imull $37, %ecx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: imull $37, %r9d, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: movb %r9b, %al
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r9b
+; SSE2-NEXT: movzbl %r9b, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: imull $37, %r10d, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: movb %r10b, %al
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r10b
+; SSE2-NEXT: movzbl %r10b, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: imull $37, %r8d, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: movb %r8b, %al
+; SSE2-NEXT: subb %cl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %cl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %r8b
+; SSE2-NEXT: movzbl %r8b, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: imull $37, %ecx, %edx
+; SSE2-NEXT: shrl $8, %edx
+; SSE2-NEXT: movb %cl, %al
+; SSE2-NEXT: subb %dl, %al
+; SSE2-NEXT: shrb %al
+; SSE2-NEXT: addb %dl, %al
+; SSE2-NEXT: shrb $2, %al
+; SSE2-NEXT: mulb %r11b
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_rem7_16i8:
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %cl, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: movb $7, %cl
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %edx
+; SSE41-NEXT: pextrb $0, %xmm0, %esi
+; SSE41-NEXT: imull $37, %esi, %edi
+; SSE41-NEXT: shrl $8, %edi
+; SSE41-NEXT: movb %sil, %al
+; SSE41-NEXT: subb %dil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %dil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %sil
+; SSE41-NEXT: movzbl %sil, %eax
+; SSE41-NEXT: movd %eax, %xmm1
+; SSE41-NEXT: pinsrb $1, %edx, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %edx
+; SSE41-NEXT: imull $37, %edx, %esi
+; SSE41-NEXT: shrl $8, %esi
+; SSE41-NEXT: movb %dl, %al
+; SSE41-NEXT: subb %sil, %al
+; SSE41-NEXT: shrb %al
+; SSE41-NEXT: addb %sil, %al
+; SSE41-NEXT: shrb $2, %al
+; SSE41-NEXT: mulb %cl
+; SSE41-NEXT: subb %al, %dl
+; SSE41-NEXT: movzbl %dl, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: test_rem7_16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movb $7, %cl
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %edx
+; AVX-NEXT: vpextrb $0, %xmm0, %esi
+; AVX-NEXT: imull $37, %esi, %edi
+; AVX-NEXT: shrl $8, %edi
+; AVX-NEXT: movb %sil, %al
+; AVX-NEXT: subb %dil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %dil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %sil
+; AVX-NEXT: movzbl %sil, %eax
+; AVX-NEXT: vmovd %eax, %xmm1
+; AVX-NEXT: vpinsrb $1, %edx, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $3, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $4, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $5, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $6, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $7, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $8, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $9, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $10, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $11, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $12, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $13, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $14, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX-NEXT: vpextrb $15, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %res = urem <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %res
+}
Added: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll?rev=263917&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll Sun Mar 20 16:46:58 2016
@@ -0,0 +1,878 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+;
+; udiv by 7
+;
+
+define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
+; AVX-LABEL: test_div7_4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm1, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: shrq %rcx
+; AVX-NEXT: addq %rdx, %rcx
+; AVX-NEXT: shrq $2, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = udiv <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
+ ret <4 x i64> %res
+}
+
+define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
+; AVX-LABEL: test_div7_8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrld $2, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %res
+}
+
+define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind {
+; AVX-LABEL: test_div7_16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
+; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $2, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %res
+}
+
+define <32 x i8> @test_div7_32i8(<32 x i8> %a) nounwind {
+; AVX-LABEL: test_div7_32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX-NEXT: imull $37, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: subb %dl, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: shrb $2, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: imull $37, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: subb %dl, %cl
+; AVX-NEXT: shrb %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: shrb $2, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm0, %eax
+; AVX-NEXT: imull $37, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = udiv <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <32 x i8> %res
+}
+
+;
+; urem by 7
+;
+
+define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
+; AVX-LABEL: test_rem7_4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX-NEXT: movabsq $2635249153387078803, %rsi # imm = 0x2492492492492493
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm1, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm2
+; AVX-NEXT: vmovq %xmm0, %rcx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: mulq %rsi
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: subq %rdx, %rax
+; AVX-NEXT: shrq %rax
+; AVX-NEXT: addq %rdx, %rax
+; AVX-NEXT: shrq $2, %rax
+; AVX-NEXT: leaq (,%rax,8), %rdx
+; AVX-NEXT: subq %rax, %rdx
+; AVX-NEXT: subq %rdx, %rcx
+; AVX-NEXT: vmovq %rcx, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = urem <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
+ ret <4 x i64> %res
+}
+
+define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
+; AVX-LABEL: test_rem7_8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vpsrld $1, %ymm2, %ymm2
+; AVX-NEXT: vpaddd %ymm1, %ymm2, %ymm1
+; AVX-NEXT: vpsrld $2, %ymm1, %ymm1
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = urem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i32> %res
+}
+
+define <16 x i16> @test_rem7_16i16(<16 x i16> %a) nounwind {
+; AVX-LABEL: test_rem7_16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
+; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX-NEXT: vpaddw %ymm1, %ymm2, %ymm1
+; AVX-NEXT: vpsrlw $2, %ymm1, %ymm1
+; AVX-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = urem <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <16 x i16> %res
+}
+
+define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
+; AVX-LABEL: test_rem7_32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %cl, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: movb $7, %cl
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %edx
+; AVX-NEXT: vpextrb $0, %xmm1, %esi
+; AVX-NEXT: imull $37, %esi, %edi
+; AVX-NEXT: shrl $8, %edi
+; AVX-NEXT: movb %sil, %al
+; AVX-NEXT: subb %dil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %dil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %sil
+; AVX-NEXT: movzbl %sil, %eax
+; AVX-NEXT: vmovd %eax, %xmm2
+; AVX-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm1, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX-NEXT: vpextrb $1, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %edx
+; AVX-NEXT: vpextrb $0, %xmm0, %esi
+; AVX-NEXT: imull $37, %esi, %edi
+; AVX-NEXT: shrl $8, %edi
+; AVX-NEXT: movb %sil, %al
+; AVX-NEXT: subb %dil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %dil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %sil
+; AVX-NEXT: movzbl %sil, %eax
+; AVX-NEXT: vmovd %eax, %xmm2
+; AVX-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $2, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $3, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $4, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $5, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $6, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $7, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $8, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $9, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $10, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $11, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $12, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $13, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $14, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrb $15, %xmm0, %edx
+; AVX-NEXT: imull $37, %edx, %esi
+; AVX-NEXT: shrl $8, %esi
+; AVX-NEXT: movb %dl, %al
+; AVX-NEXT: subb %sil, %al
+; AVX-NEXT: shrb %al
+; AVX-NEXT: addb %sil, %al
+; AVX-NEXT: shrb $2, %al
+; AVX-NEXT: mulb %cl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: movzbl %dl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0
+; AVX-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %res = urem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <32 x i8> %res
+}
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv.ll?rev=263917&r1=263916&r2=263917&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv.ll Sun Mar 20 16:46:58 2016
@@ -1,1212 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=x86-64 -mcpu=core2 -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=SSE41
-; RUN: llc -march=x86-64 -mcpu=core2 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-target triple = "x86_64-unknown-unknown"
-
-define <4 x i32> @test1(<4 x i32> %a) #0 {
-; SSE41-LABEL: test1:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: pmuludq %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE41-NEXT: psubd %xmm1, %xmm0
-; SSE41-NEXT: psrld $1, %xmm0
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: psrld $2, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test1:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT: psubd %xmm2, %xmm0
-; SSE-NEXT: psrld $1, %xmm0
-; SSE-NEXT: paddd %xmm2, %xmm0
-; SSE-NEXT: psrld $2, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test1:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
-; AVX-NEXT: retq
- %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
- ret <4 x i32> %div
-}
-
-define <8 x i32> @test2(<8 x i32> %a) #0 {
-; SSE41-LABEL: test2:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pmuludq %xmm2, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
-; SSE41-NEXT: psubd %xmm5, %xmm0
-; SSE41-NEXT: psrld $1, %xmm0
-; SSE41-NEXT: paddd %xmm5, %xmm0
-; SSE41-NEXT: psrld $2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; SSE41-NEXT: psubd %xmm2, %xmm1
-; SSE41-NEXT: psrld $1, %xmm1
-; SSE41-NEXT: paddd %xmm2, %xmm1
-; SSE41-NEXT: psrld $2, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test2:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE-NEXT: psubd %xmm3, %xmm0
-; SSE-NEXT: psrld $1, %xmm0
-; SSE-NEXT: paddd %xmm3, %xmm0
-; SSE-NEXT: psrld $2, %xmm0
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: psubd %xmm2, %xmm1
-; SSE-NEXT: psrld $1, %xmm1
-; SSE-NEXT: paddd %xmm2, %xmm1
-; SSE-NEXT: psrld $2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test2:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpsrld $2, %ymm0, %ymm0
-; AVX-NEXT: retq
- %div = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
- ret <8 x i32> %div
-}
-
-define <8 x i16> @test3(<8 x i16> %a) #0 {
-; SSE41-LABEL: test3:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
-; SSE41-NEXT: pmulhuw %xmm0, %xmm1
-; SSE41-NEXT: psubw %xmm1, %xmm0
-; SSE41-NEXT: psrlw $1, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: psrlw $2, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test3:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
-; SSE-NEXT: pmulhuw %xmm0, %xmm1
-; SSE-NEXT: psubw %xmm1, %xmm0
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: psrlw $2, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test3:
-; AVX: # BB#0:
-; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
-; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
-; AVX-NEXT: retq
- %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
- ret <8 x i16> %div
-}
-
-define <16 x i16> @test4(<16 x i16> %a) #0 {
-; SSE41-LABEL: test4:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pmulhuw %xmm2, %xmm3
-; SSE41-NEXT: psubw %xmm3, %xmm0
-; SSE41-NEXT: psrlw $1, %xmm0
-; SSE41-NEXT: paddw %xmm3, %xmm0
-; SSE41-NEXT: psrlw $2, %xmm0
-; SSE41-NEXT: pmulhuw %xmm1, %xmm2
-; SSE41-NEXT: psubw %xmm2, %xmm1
-; SSE41-NEXT: psrlw $1, %xmm1
-; SSE41-NEXT: paddw %xmm2, %xmm1
-; SSE41-NEXT: psrlw $2, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test4:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pmulhuw %xmm2, %xmm3
-; SSE-NEXT: psubw %xmm3, %xmm0
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: paddw %xmm3, %xmm0
-; SSE-NEXT: psrlw $2, %xmm0
-; SSE-NEXT: pmulhuw %xmm1, %xmm2
-; SSE-NEXT: psubw %xmm2, %xmm1
-; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: psrlw $2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test4:
-; AVX: # BB#0:
-; AVX-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
-; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpsrlw $2, %ymm0, %ymm0
-; AVX-NEXT: retq
- %div = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
- ret <16 x i16> %div
-}
-
-define <8 x i16> @test5(<8 x i16> %a) #0 {
-; SSE41-LABEL: test5:
-; SSE41: # BB#0:
-; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $15, %xmm1
-; SSE41-NEXT: psraw $1, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test5:
-; SSE: # BB#0:
-; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrlw $15, %xmm1
-; SSE-NEXT: psraw $1, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test5:
-; AVX: # BB#0:
-; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1
-; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
-; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
- ret <8 x i16> %div
-}
-
-define <16 x i16> @test6(<16 x i16> %a) #0 {
-; SSE41-LABEL: test6:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
-; SSE41-NEXT: pmulhw %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlw $15, %xmm3
-; SSE41-NEXT: psraw $1, %xmm0
-; SSE41-NEXT: paddw %xmm3, %xmm0
-; SSE41-NEXT: pmulhw %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $15, %xmm2
-; SSE41-NEXT: psraw $1, %xmm1
-; SSE41-NEXT: paddw %xmm2, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test6:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
-; SSE-NEXT: pmulhw %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrlw $15, %xmm3
-; SSE-NEXT: psraw $1, %xmm0
-; SSE-NEXT: paddw %xmm3, %xmm0
-; SSE-NEXT: pmulhw %xmm2, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlw $15, %xmm2
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test6:
-; AVX: # BB#0:
-; AVX-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm0
-; AVX-NEXT: vpsrlw $15, %ymm0, %ymm1
-; AVX-NEXT: vpsraw $1, %ymm0, %ymm0
-; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
- %div = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
- ret <16 x i16> %div
-}
-
-define <16 x i8> @test7(<16 x i8> %a) #0 {
-; SSE41-LABEL: test7:
-; SSE41: # BB#0:
-; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pextrb $0, %xmm0, %ecx
-; SSE41-NEXT: movsbl %cl, %ecx
-; SSE41-NEXT: imull $-109, %ecx, %edx
-; SSE41-NEXT: shrl $8, %edx
-; SSE41-NEXT: addb %dl, %cl
-; SSE41-NEXT: movb %cl, %dl
-; SSE41-NEXT: shrb $7, %dl
-; SSE41-NEXT: sarb $2, %cl
-; SSE41-NEXT: addb %dl, %cl
-; SSE41-NEXT: movzbl %cl, %ecx
-; SSE41-NEXT: movd %ecx, %xmm1
-; SSE41-NEXT: pinsrb $1, %eax, %xmm1
-; SSE41-NEXT: pextrb $2, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $2, %eax, %xmm1
-; SSE41-NEXT: pextrb $3, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $3, %eax, %xmm1
-; SSE41-NEXT: pextrb $4, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $4, %eax, %xmm1
-; SSE41-NEXT: pextrb $5, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $5, %eax, %xmm1
-; SSE41-NEXT: pextrb $6, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $6, %eax, %xmm1
-; SSE41-NEXT: pextrb $7, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $7, %eax, %xmm1
-; SSE41-NEXT: pextrb $8, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $8, %eax, %xmm1
-; SSE41-NEXT: pextrb $9, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $9, %eax, %xmm1
-; SSE41-NEXT: pextrb $10, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $10, %eax, %xmm1
-; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $11, %eax, %xmm1
-; SSE41-NEXT: pextrb $12, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $12, %eax, %xmm1
-; SSE41-NEXT: pextrb $13, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $13, %eax, %xmm1
-; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $14, %eax, %xmm1
-; SSE41-NEXT: pextrb $15, %xmm0, %eax
-; SSE41-NEXT: movsbl %al, %eax
-; SSE41-NEXT: imull $-109, %eax, %ecx
-; SSE41-NEXT: shrl $8, %ecx
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movb %al, %cl
-; SSE41-NEXT: shrb $7, %cl
-; SSE41-NEXT: sarb $2, %al
-; SSE41-NEXT: addb %cl, %al
-; SSE41-NEXT: movzbl %al, %eax
-; SSE41-NEXT: pinsrb $15, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test7:
-; SSE: # BB#0:
-; SSE-NEXT: pushq %rbp
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
-; SSE-NEXT: imull $-109, %eax, %ecx
-; SSE-NEXT: shrl $8, %ecx
-; SSE-NEXT: addb %al, %cl
-; SSE-NEXT: movb %cl, %al
-; SSE-NEXT: shrb $7, %al
-; SSE-NEXT: sarb $2, %cl
-; SSE-NEXT: addb %al, %cl
-; SSE-NEXT: movzbl %cl, %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r11d
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi
-; SSE-NEXT: imull $-109, %esi, %edi
-; SSE-NEXT: shrl $8, %edi
-; SSE-NEXT: addb %sil, %dil
-; SSE-NEXT: movb %dil, %bl
-; SSE-NEXT: shrb $7, %bl
-; SSE-NEXT: sarb $2, %dil
-; SSE-NEXT: addb %bl, %dil
-; SSE-NEXT: movzbl %dil, %esi
-; SSE-NEXT: movd %esi, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: imull $-109, %eax, %esi
-; SSE-NEXT: shrl $8, %esi
-; SSE-NEXT: addb %al, %sil
-; SSE-NEXT: movb %sil, %al
-; SSE-NEXT: shrb $7, %al
-; SSE-NEXT: sarb $2, %sil
-; SSE-NEXT: addb %al, %sil
-; SSE-NEXT: movzbl %sil, %eax
-; SSE-NEXT: movd %eax, %xmm2
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi
-; SSE-NEXT: imull $-109, %edi, %ebx
-; SSE-NEXT: shrl $8, %ebx
-; SSE-NEXT: addb %dil, %bl
-; SSE-NEXT: movb %bl, %al
-; SSE-NEXT: shrb $7, %al
-; SSE-NEXT: sarb $2, %bl
-; SSE-NEXT: addb %al, %bl
-; SSE-NEXT: movzbl %bl, %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: imull $-109, %edx, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movb %al, %dl
-; SSE-NEXT: shrb $7, %dl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: imull $-109, %esi, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %sil, %al
-; SSE-NEXT: movb %al, %dl
-; SSE-NEXT: shrb $7, %dl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE-NEXT: imull $-109, %ecx, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movb %al, %cl
-; SSE-NEXT: shrb $7, %cl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm3
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
-; SSE-NEXT: imull $-109, %eax, %edx
-; SSE-NEXT: shrl $8, %edx
-; SSE-NEXT: addb %al, %dl
-; SSE-NEXT: movb %dl, %al
-; SSE-NEXT: shrb $7, %al
-; SSE-NEXT: sarb $2, %dl
-; SSE-NEXT: addb %al, %dl
-; SSE-NEXT: movzbl %dl, %eax
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT: imull $-109, %r14d, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %r14b, %al
-; SSE-NEXT: movb %al, %dl
-; SSE-NEXT: shrb $7, %dl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm2
-; SSE-NEXT: imull $-109, %ebp, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %bpl, %al
-; SSE-NEXT: movb %al, %dl
-; SSE-NEXT: shrb $7, %dl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT: imull $-109, %r11d, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %r11b, %al
-; SSE-NEXT: movb %al, %dl
-; SSE-NEXT: shrb $7, %dl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %dl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm3
-; SSE-NEXT: imull $-109, %ecx, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movb %al, %cl
-; SSE-NEXT: shrb $7, %cl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm2
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE-NEXT: imull $-109, %r9d, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %r9b, %al
-; SSE-NEXT: movb %al, %cl
-; SSE-NEXT: shrb $7, %cl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: imull $-109, %r10d, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %r10b, %al
-; SSE-NEXT: movb %al, %cl
-; SSE-NEXT: shrb $7, %cl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm3
-; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE-NEXT: imull $-109, %r8d, %eax
-; SSE-NEXT: shrl $8, %eax
-; SSE-NEXT: addb %r8b, %al
-; SSE-NEXT: movb %al, %cl
-; SSE-NEXT: shrb $7, %cl
-; SSE-NEXT: sarb $2, %al
-; SSE-NEXT: addb %cl, %al
-; SSE-NEXT: movzbl %al, %eax
-; SSE-NEXT: movd %eax, %xmm4
-; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
-; SSE-NEXT: imull $-109, %eax, %ecx
-; SSE-NEXT: shrl $8, %ecx
-; SSE-NEXT: addb %al, %cl
-; SSE-NEXT: movb %cl, %al
-; SSE-NEXT: shrb $7, %al
-; SSE-NEXT: sarb $2, %cl
-; SSE-NEXT: addb %al, %cl
-; SSE-NEXT: movzbl %cl, %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %rbp
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test7:
-; AVX: # BB#0:
-; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: movsbl %al, %eax
-; AVX-NEXT: imull $-109, %eax, %ecx
-; AVX-NEXT: shrl $8, %ecx
-; AVX-NEXT: addb %cl, %al
-; AVX-NEXT: movb %al, %cl
-; AVX-NEXT: shrb $7, %cl
-; AVX-NEXT: sarb $2, %al
-; AVX-NEXT: addb %cl, %al
-; AVX-NEXT: movzbl %al, %eax
-; AVX-NEXT: vpextrb $0, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %dl
-; AVX-NEXT: shrb $7, %dl
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movzbl %cl, %ecx
-; AVX-NEXT: vmovd %ecx, %xmm1
-; AVX-NEXT: vpextrb $2, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $3, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $4, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $5, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $6, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $7, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $8, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $9, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $10, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $11, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $12, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $13, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $14, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpextrb $15, %xmm0, %ecx
-; AVX-NEXT: movsbl %cl, %ecx
-; AVX-NEXT: imull $-109, %ecx, %edx
-; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm0
-; AVX-NEXT: shrl $8, %edx
-; AVX-NEXT: addb %dl, %cl
-; AVX-NEXT: movb %cl, %al
-; AVX-NEXT: shrb $7, %al
-; AVX-NEXT: sarb $2, %cl
-; AVX-NEXT: addb %al, %cl
-; AVX-NEXT: movzbl %cl, %eax
-; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
- %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
- ret <16 x i8> %div
-}
-
-define <4 x i32> @test8(<4 x i32> %a) #0 {
-; SSE41-LABEL: test8:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm2, %xmm3
-; SSE41-NEXT: pmuldq %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $31, %xmm0
-; SSE41-NEXT: psrad $2, %xmm1
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test8:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: paddd %xmm1, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE-NEXT: psubd %xmm2, %xmm1
-; SSE-NEXT: paddd %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $31, %xmm0
-; SSE-NEXT: psrad $2, %xmm1
-; SSE-NEXT: paddd %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test8:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
-; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
- %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
- ret <4 x i32> %div
-}
-
-define <8 x i32> @test9(<8 x i32> %a) #0 {
-; SSE41-LABEL: test9:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm4, %xmm5
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pmuldq %xmm3, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
-; SSE41-NEXT: paddd %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psrld $31, %xmm0
-; SSE41-NEXT: psrad $2, %xmm2
-; SSE41-NEXT: paddd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm4, %xmm0
-; SSE41-NEXT: pmuldq %xmm1, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
-; SSE41-NEXT: paddd %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: psrld $31, %xmm0
-; SSE41-NEXT: psrad $2, %xmm3
-; SSE41-NEXT: paddd %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test9:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrad $31, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm2, %xmm5
-; SSE-NEXT: psrad $31, %xmm5
-; SSE-NEXT: pand %xmm3, %xmm5
-; SSE-NEXT: paddd %xmm0, %xmm5
-; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: pmuludq %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm6, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; SSE-NEXT: psubd %xmm5, %xmm0
-; SSE-NEXT: paddd %xmm2, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrld $31, %xmm2
-; SSE-NEXT: psrad $2, %xmm0
-; SSE-NEXT: paddd %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm4
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: psrad $31, %xmm5
-; SSE-NEXT: pand %xmm3, %xmm5
-; SSE-NEXT: paddd %xmm4, %xmm5
-; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm6, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: psubd %xmm5, %xmm2
-; SSE-NEXT: paddd %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: psrld $31, %xmm1
-; SSE-NEXT: psrad $2, %xmm2
-; SSE-NEXT: paddd %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test9:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
-; AVX-NEXT: vpsrld $31, %ymm0, %ymm1
-; AVX-NEXT: vpsrad $2, %ymm0, %ymm0
-; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
- %div = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
- ret <8 x i32> %div
-}
-
-define <8 x i32> @test10(<8 x i32> %a) #0 {
-; SSE41-LABEL: test10:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pmuludq %xmm2, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psubd %xmm5, %xmm4
-; SSE41-NEXT: psrld $1, %xmm4
-; SSE41-NEXT: paddd %xmm5, %xmm4
-; SSE41-NEXT: psrld $2, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7]
-; SSE41-NEXT: pmulld %xmm5, %xmm4
-; SSE41-NEXT: psubd %xmm4, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm3, %xmm4
-; SSE41-NEXT: pmuludq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psubd %xmm2, %xmm3
-; SSE41-NEXT: psrld $1, %xmm3
-; SSE41-NEXT: paddd %xmm2, %xmm3
-; SSE41-NEXT: psrld $2, %xmm3
-; SSE41-NEXT: pmulld %xmm5, %xmm3
-; SSE41-NEXT: psubd %xmm3, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test10:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [613566757,613566757,613566757,613566757]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pmuludq %xmm3, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: psubd %xmm2, %xmm5
-; SSE-NEXT: psrld $1, %xmm5
-; SSE-NEXT: paddd %xmm2, %xmm5
-; SSE-NEXT: psrld $2, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; SSE-NEXT: psubd %xmm5, %xmm0
-; SSE-NEXT: pmuludq %xmm1, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psubd %xmm3, %xmm4
-; SSE-NEXT: psrld $1, %xmm4
-; SSE-NEXT: paddd %xmm3, %xmm4
-; SSE-NEXT: psrld $2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE-NEXT: psubd %xmm4, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test10:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm2
-; AVX-NEXT: vpsrld $1, %ymm2, %ymm2
-; AVX-NEXT: vpaddd %ymm1, %ymm2, %ymm1
-; AVX-NEXT: vpsrld $2, %ymm1, %ymm1
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
- %rem = urem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
- ret <8 x i32> %rem
-}
-
-define <8 x i32> @test11(<8 x i32> %a) #0 {
-; SSE41-LABEL: test11:
-; SSE41: # BB#0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pmuldq %xmm2, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
-; SSE41-NEXT: paddd %xmm0, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm4
-; SSE41-NEXT: psrld $31, %xmm4
-; SSE41-NEXT: psrad $2, %xmm5
-; SSE41-NEXT: paddd %xmm4, %xmm5
-; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7]
-; SSE41-NEXT: pmulld %xmm4, %xmm5
-; SSE41-NEXT: psubd %xmm5, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuldq %xmm3, %xmm5
-; SSE41-NEXT: pmuldq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
-; SSE41-NEXT: paddd %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrld $31, %xmm3
-; SSE41-NEXT: psrad $2, %xmm2
-; SSE41-NEXT: paddd %xmm3, %xmm2
-; SSE41-NEXT: pmulld %xmm4, %xmm2
-; SSE41-NEXT: psubd %xmm2, %xmm1
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test11:
-; SSE: # BB#0:
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: paddd %xmm4, %xmm6
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm5, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
-; SSE-NEXT: psubd %xmm6, %xmm7
-; SSE-NEXT: paddd %xmm0, %xmm7
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: psrld $31, %xmm4
-; SSE-NEXT: psrad $2, %xmm7
-; SSE-NEXT: paddd %xmm4, %xmm7
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE-NEXT: psubd %xmm7, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm6
-; SSE-NEXT: psrad $31, %xmm6
-; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: paddd %xmm3, %xmm6
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm5, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: psubd %xmm6, %xmm2
-; SSE-NEXT: paddd %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: psrld $31, %xmm3
-; SSE-NEXT: psrad $2, %xmm2
-; SSE-NEXT: paddd %xmm3, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE-NEXT: pmuludq %xmm4, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: psubd %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test11:
-; AVX: # BB#0:
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
-; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm1
-; AVX-NEXT: vpsrld $31, %ymm1, %ymm2
-; AVX-NEXT: vpsrad $2, %ymm1, %ymm1
-; AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
-; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
- %rem = srem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
- ret <8 x i32> %rem
-}
-
-define <2 x i16> @test12() #0 {
-; SSE41-LABEL: test12:
-; SSE41: # BB#0:
-; SSE41-NEXT: xorps %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; SSE-LABEL: test12:
+define <2 x i16> @test_urem_unary_v2i16() nounwind {
+; SSE-LABEL: test_urem_unary_v2i16:
; SSE: # BB#0:
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: test12:
+; AVX-LABEL: test_urem_unary_v2i16:
; AVX: # BB#0:
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
@@ -1216,7 +20,30 @@ define <2 x i16> @test12() #0 {
ret <2 x i16> %B9
}
-define <4 x i32> @PR20355(<4 x i32> %a) #0 {
+define <4 x i32> @PR20355(<4 x i32> %a) nounwind {
+; SSE2-LABEL: PR20355:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psrad $31, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSE2-NEXT: psubd %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: psrld $31, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
; SSE41-LABEL: PR20355:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
@@ -1231,44 +58,32 @@ define <4 x i32> @PR20355(<4 x i32> %a)
; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; SSE-LABEL: PR20355:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $31, %xmm2
-; SSE-NEXT: pand %xmm0, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrad $31, %xmm3
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: paddd %xmm2, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
-; SSE-NEXT: psubd %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm4, %xmm0
-; SSE-NEXT: psrld $31, %xmm0
-; SSE-NEXT: paddd %xmm4, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: PR20355:
-; AVX: # BB#0: # %entry
-; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
-; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: PR20355:
+; AVX1: # BB#0: # %entry
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: PR20355:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
+; AVX2-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
entry:
%sdiv = sdiv <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
ret <4 x i32> %sdiv
}
-
-attributes #0 = { nounwind }
More information about the llvm-commits
mailing list