[llvm] r218954 - [x86] Regenerate precise FileCheck lines for the lats batch of test
Chandler Carruth
chandlerc at gmail.com
Thu Oct 2 18:57:38 PDT 2014
Author: chandlerc
Date: Thu Oct 2 20:57:38 2014
New Revision: 218954
URL: http://llvm.org/viewvc/llvm-project?rev=218954&view=rev
Log:
[x86] Regenerate precise FileCheck lines for the lats batch of test
cases.
Modified:
llvm/trunk/test/CodeGen/X86/vector-idiv.ll
llvm/trunk/test/CodeGen/X86/vselect.ll
llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv.ll?rev=218954&r1=218953&r2=218954&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv.ll Thu Oct 2 20:57:38 2014
@@ -1,260 +1,1250 @@
-; RUN: llc -march=x86-64 -mcpu=core2 -mattr=+sse4.1 < %s | FileCheck %s -check-prefix=SSE41
-; RUN: llc -march=x86-64 -mcpu=core2 < %s | FileCheck %s -check-prefix=SSE
-; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s -check-prefix=AVX
+; RUN: llc -march=x86-64 -mcpu=core2 -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=SSE41
+; RUN: llc -march=x86-64 -mcpu=core2 < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -march=x86-64 -mcpu=core-avx2 < %s | FileCheck %s --check-prefix=AVX
define <4 x i32> @test1(<4 x i32> %a) {
- %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
- ret <4 x i32> %div
-
; SSE41-LABEL: test1:
-; SSE41: pmuludq
-; SSE41: pshufd $49
-; SSE41: pmuludq
-; SSE41: shufps $-35
-; SSE41: psubd
-; SSE41: psrld $1
-; SSE41: padd
-; SSE41: psrld $2
-
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuludq %xmm1, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
+; SSE41-NEXT: psubd %xmm1, %xmm0
+; SSE41-NEXT: psrld $1, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
+; SSE41-NEXT: psrld $2, %xmm0
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test1:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
+; SSE-NEXT: psubd %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: psrld $2, %xmm0
+; SSE-NEXT: retq
+;
; AVX-LABEL: test1:
-; AVX: vpmuludq
-; AVX: vpshufd $49
-; AVX: vpmuludq
-; AVX: vshufps $-35
-; AVX: vpsubd
-; AVX: vpsrld $1
-; AVX: vpadd
-; AVX: vpsrld $2
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,0,3,0]
+; AVX-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
}
define <8 x i32> @test2(<8 x i32> %a) {
+; SSE41-LABEL: test2:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuludq %xmm4, %xmm5
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE41-NEXT: psubd %xmm3, %xmm0
+; SSE41-NEXT: psrld $1, %xmm0
+; SSE41-NEXT: paddd %xmm3, %xmm0
+; SSE41-NEXT: psrld $2, %xmm0
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,0]
+; SSE41-NEXT: pmuludq %xmm4, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: psubd %xmm2, %xmm1
+; SSE41-NEXT: psrld $1, %xmm1
+; SSE41-NEXT: paddd %xmm2, %xmm1
+; SSE41-NEXT: psrld $2, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test2:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE-NEXT: psubd %xmm3, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm3, %xmm0
+; SSE-NEXT: psrld $2, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: psubd %xmm2, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm1
+; SSE-NEXT: psrld $2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test2:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm2 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19]
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrld $2, %ymm0, %ymm0
+; AVX-NEXT: retq
%div = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
ret <8 x i32> %div
-
-; AVX-LABEL: test2:
-; AVX: vpbroadcastd
-; AVX: vpalignr $4
-; AVX: vpmuludq
-; AVX: vpmuludq
-; AVX: vpblendd $170
-; AVX: vpsubd
-; AVX: vpsrld $1
-; AVX: vpadd
-; AVX: vpsrld $2
}
define <8 x i16> @test3(<8 x i16> %a) {
- %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
- ret <8 x i16> %div
-
; SSE41-LABEL: test3:
-; SSE41: pmulhuw
-; SSE41: psubw
-; SSE41: psrlw $1
-; SSE41: paddw
-; SSE41: psrlw $2
-
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE41-NEXT: pmulhuw %xmm0, %xmm1
+; SSE41-NEXT: psubw %xmm1, %xmm0
+; SSE41-NEXT: psrlw $1, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: psrlw $2, %xmm0
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test3:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE-NEXT: pmulhuw %xmm0, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: psrlw $2, %xmm0
+; SSE-NEXT: retq
+;
; AVX-LABEL: test3:
-; AVX: vpmulhuw
-; AVX: vpsubw
-; AVX: vpsrlw $1
-; AVX: vpaddw
-; AVX: vpsrlw $2
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
}
define <16 x i16> @test4(<16 x i16> %a) {
+; SSE41-LABEL: test4:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pmulhuw %xmm2, %xmm3
+; SSE41-NEXT: psubw %xmm3, %xmm0
+; SSE41-NEXT: psrlw $1, %xmm0
+; SSE41-NEXT: paddw %xmm3, %xmm0
+; SSE41-NEXT: psrlw $2, %xmm0
+; SSE41-NEXT: pmulhuw %xmm1, %xmm2
+; SSE41-NEXT: psubw %xmm2, %xmm1
+; SSE41-NEXT: psrlw $1, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
+; SSE41-NEXT: psrlw $2, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test4:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmulhuw %xmm2, %xmm3
+; SSE-NEXT: psubw %xmm3, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm3, %xmm0
+; SSE-NEXT: psrlw $2, %xmm0
+; SSE-NEXT: pmulhuw %xmm1, %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm2, %xmm1
+; SSE-NEXT: psrlw $2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test4:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhuw {{.*}}(%rip), %ymm0, %ymm1
+; AVX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $2, %ymm0, %ymm0
+; AVX-NEXT: retq
%div = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
ret <16 x i16> %div
-
-; AVX-LABEL: test4:
-; AVX: vpmulhuw
-; AVX: vpsubw
-; AVX: vpsrlw $1
-; AVX: vpaddw
-; AVX: vpsrlw $2
-; AVX-NOT: vpmulhuw
}
define <8 x i16> @test5(<8 x i16> %a) {
- %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
- ret <8 x i16> %div
-
; SSE41-LABEL: test5:
-; SSE41: pmulhw
-; SSE41: psrlw $15
-; SSE41: psraw $1
-; SSE41: paddw
-
+; SSE41: # BB#0:
+; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $15, %xmm1
+; SSE41-NEXT: psraw $1, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test5:
+; SSE: # BB#0:
+; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlw $15, %xmm1
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
; AVX-LABEL: test5:
-; AVX: vpmulhw
-; AVX: vpsrlw $15
-; AVX: vpsraw $1
-; AVX: vpaddw
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
+ ret <8 x i16> %div
}
define <16 x i16> @test6(<16 x i16> %a) {
+; SSE41-LABEL: test6:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
+; SSE41-NEXT: pmulhw %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: psrlw $15, %xmm3
+; SSE41-NEXT: psraw $1, %xmm0
+; SSE41-NEXT: paddw %xmm3, %xmm0
+; SSE41-NEXT: pmulhw %xmm2, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psrlw $15, %xmm2
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: paddw %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test6:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725]
+; SSE-NEXT: pmulhw %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrlw $15, %xmm3
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm3, %xmm0
+; SSE-NEXT: pmulhw %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrlw $15, %xmm2
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test6:
+; AVX: # BB#0:
+; AVX-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: vpsrlw $15, %ymm0, %ymm1
+; AVX-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%div = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7>
ret <16 x i16> %div
-
-; AVX-LABEL: test6:
-; AVX: vpmulhw
-; AVX: vpsrlw $15
-; AVX: vpsraw $1
-; AVX: vpaddw
-; AVX-NOT: vpmulhw
}
define <16 x i8> @test7(<16 x i8> %a) {
- %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
- ret <16 x i8> %div
-
-; FIXME: scalarized
; SSE41-LABEL: test7:
-; SSE41: pext
+; SSE41: # BB#0:
+; SSE41-NEXT: pextrb $1, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pextrb $0, %xmm0, %ecx
+; SSE41-NEXT: movsbl %cl, %ecx
+; SSE41-NEXT: imull $-109, %ecx, %edx
+; SSE41-NEXT: shrl $8, %edx
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: movb %cl, %dl
+; SSE41-NEXT: shrb $7, %dl
+; SSE41-NEXT: sarb $2, %cl
+; SSE41-NEXT: addb %dl, %cl
+; SSE41-NEXT: movzbl %cl, %ecx
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrb $1, %eax, %xmm1
+; SSE41-NEXT: pextrb $2, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm1
+; SSE41-NEXT: pextrb $3, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm1
+; SSE41-NEXT: pextrb $4, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm1
+; SSE41-NEXT: pextrb $5, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm1
+; SSE41-NEXT: pextrb $6, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm1
+; SSE41-NEXT: pextrb $7, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm1
+; SSE41-NEXT: pextrb $8, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm1
+; SSE41-NEXT: pextrb $9, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm1
+; SSE41-NEXT: pextrb $10, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm1
+; SSE41-NEXT: pextrb $11, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm1
+; SSE41-NEXT: pextrb $12, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm1
+; SSE41-NEXT: pextrb $13, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm1
+; SSE41-NEXT: pextrb $14, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm1
+; SSE41-NEXT: pextrb $15, %xmm0, %eax
+; SSE41-NEXT: movsbl %al, %eax
+; SSE41-NEXT: imull $-109, %eax, %ecx
+; SSE41-NEXT: shrl $8, %ecx
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movb %al, %cl
+; SSE41-NEXT: shrb $7, %cl
+; SSE41-NEXT: sarb $2, %al
+; SSE41-NEXT: addb %cl, %al
+; SSE41-NEXT: movzbl %al, %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test7:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm4
+; SSE-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: imull $-109, %eax, %ecx
+; SSE-NEXT: shrl $8, %ecx
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movb %cl, %al
+; SSE-NEXT: shrb $7, %al
+; SSE-NEXT: sarb $2, %cl
+; SSE-NEXT: addb %al, %cl
+; SSE-NEXT: movzbl %cl, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: retq
+;
; AVX-LABEL: test7:
-; AVX: pext
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $1, %xmm0, %eax
+; AVX-NEXT: movsbl %al, %eax
+; AVX-NEXT: imull $-109, %eax, %ecx
+; AVX-NEXT: shrl $8, %ecx
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movb %al, %cl
+; AVX-NEXT: shrb $7, %cl
+; AVX-NEXT: sarb $2, %al
+; AVX-NEXT: addb %cl, %al
+; AVX-NEXT: movzbl %al, %eax
+; AVX-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %dl
+; AVX-NEXT: shrb $7, %dl
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movzbl %cl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm1
+; AVX-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $5, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $7, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $9, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $11, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $13, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpextrb $15, %xmm0, %ecx
+; AVX-NEXT: movsbl %cl, %ecx
+; AVX-NEXT: imull $-109, %ecx, %edx
+; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm0
+; AVX-NEXT: shrl $8, %edx
+; AVX-NEXT: addb %dl, %cl
+; AVX-NEXT: movb %cl, %al
+; AVX-NEXT: shrb $7, %al
+; AVX-NEXT: sarb $2, %cl
+; AVX-NEXT: addb %al, %cl
+; AVX-NEXT: movzbl %cl, %eax
+; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %div
}
define <4 x i32> @test8(<4 x i32> %a) {
- %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
- ret <4 x i32> %div
-
; SSE41-LABEL: test8:
-; SSE41: pmuldq
-; SSE41: pshufd $49
-; SSE41: pshufd $49
-; SSE41: pmuldq
-; SSE41: shufps $-35
-; SSE41: pshufd $-40
-; SSE41: padd
-; SSE41: psrld $31
-; SSE41: psrad $2
-; SSE41: padd
-
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pmuldq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm1, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm1
+; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
; SSE-LABEL: test8:
-; SSE: pmuludq
-; SSE: pshufd $49
-; SSE: pshufd $49
-; SSE: pmuludq
-; SSE: shufps $-35
-; SSE: pshufd $-40
-; SSE: psubd
-; SSE: padd
-; SSE: psrld $31
-; SSE: psrad $2
-; SSE: padd
-
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: paddd %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm1, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,1,3]
+; SSE-NEXT: psubd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrld $31, %xmm0
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: paddd %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
; AVX-LABEL: test8:
-; AVX: vpmuldq
-; AVX: vpshufd $49
-; AVX: vpshufd $49
-; AVX: vpmuldq
-; AVX: vshufps $-35
-; AVX: vpshufd $-40
-; AVX: vpadd
-; AVX: vpsrld $31
-; AVX: vpsrad $2
-; AVX: vpadd
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,0,3,0]
+; AVX-NEXT: vpmuldq %xmm1, %xmm3, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7>
+ ret <4 x i32> %div
}
define <8 x i32> @test9(<8 x i32> %a) {
+; SSE41-LABEL: test9:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pmuldq %xmm3, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm4, %xmm5
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm2
+; SSE41-NEXT: paddd %xmm0, %xmm2
+; SSE41-NEXT: pmuldq %xmm1, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm4, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm0[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE41-NEXT: paddd %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: psrad $2, %xmm3
+; SSE41-NEXT: paddd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm3, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test9:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2454267027,2454267027,2454267027,2454267027]
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: psrad $31, %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: psrad $31, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: paddd %xmm0, %xmm5
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pmuludq %xmm3, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm6, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm7[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; SSE-NEXT: psubd %xmm5, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrld $31, %xmm2
+; SSE-NEXT: psrad $2, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm5
+; SSE-NEXT: psrad $31, %xmm5
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: paddd %xmm4, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm6, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm2[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
+; SSE-NEXT: psubd %xmm5, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: psrld $31, %xmm1
+; SSE-NEXT: psrad $2, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test9:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm2 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19]
+; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX-NEXT: vpsrld $31, %ymm0, %ymm1
+; AVX-NEXT: vpsrad $2, %ymm0, %ymm0
+; AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%div = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
ret <8 x i32> %div
-
-; AVX-LABEL: test9:
-; AVX: vpbroadcastd
-; AVX: vpalignr $4
-; AVX: vpalignr $4
-; AVX: vpmuldq
-; AVX: vpmuldq
-; AVX: vpalignr $4
-; AVX: vpblendd $170
-; AVX: vpadd
-; AVX: vpsrld $31
-; AVX: vpsrad $2
-; AVX: vpadd
}
define <8 x i32> @test10(<8 x i32> %a) {
+; SSE41-LABEL: test10:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuludq %xmm4, %xmm5
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: psubd %xmm3, %xmm5
+; SSE41-NEXT: psrld $1, %xmm5
+; SSE41-NEXT: paddd %xmm3, %xmm5
+; SSE41-NEXT: psrld $2, %xmm5
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7]
+; SSE41-NEXT: pmulld %xmm3, %xmm5
+; SSE41-NEXT: psubd %xmm5, %xmm0
+; SSE41-NEXT: pmuludq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,0,3,0]
+; SSE41-NEXT: pmuludq %xmm4, %xmm5
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: psubd %xmm2, %xmm4
+; SSE41-NEXT: psrld $1, %xmm4
+; SSE41-NEXT: paddd %xmm2, %xmm4
+; SSE41-NEXT: psrld $2, %xmm4
+; SSE41-NEXT: pmulld %xmm3, %xmm4
+; SSE41-NEXT: psubd %xmm4, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test10:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [613566757,613566757,613566757,613566757]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: psubd %xmm3, %xmm5
+; SSE-NEXT: psrld $1, %xmm5
+; SSE-NEXT: paddd %xmm3, %xmm5
+; SSE-NEXT: psrld $2, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm3, %xmm5
+; SSE-NEXT: pmuludq %xmm3, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,3]
+; SSE-NEXT: psubd %xmm5, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm5[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: psubd %xmm2, %xmm4
+; SSE-NEXT: psrld $1, %xmm4
+; SSE-NEXT: paddd %xmm2, %xmm4
+; SSE-NEXT: psrld $2, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm3, %xmm4
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,1,3]
+; SSE-NEXT: psubd %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test10:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm2 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19]
+; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm2
+; AVX-NEXT: vpsrld $1, %ymm2, %ymm2
+; AVX-NEXT: vpaddd %ymm1, %ymm2, %ymm1
+; AVX-NEXT: vpsrld $2, %ymm1, %ymm1
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%rem = urem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
ret <8 x i32> %rem
-
-; AVX-LABEL: test10:
-; AVX: vpbroadcastd
-; AVX: vpalignr $4
-; AVX: vpmuludq
-; AVX: vpmuludq
-; AVX: vpblendd $170
-; AVX: vpsubd
-; AVX: vpsrld $1
-; AVX: vpadd
-; AVX: vpsrld $2
-; AVX: vpmulld
}
define <8 x i32> @test11(<8 x i32> %a) {
+; SSE41-LABEL: test11:
+; SSE41: # BB#0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pmuldq %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,0,3,0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm4, %xmm5
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm5[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; SSE41-NEXT: paddd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm5
+; SSE41-NEXT: psrld $31, %xmm5
+; SSE41-NEXT: psrad $2, %xmm3
+; SSE41-NEXT: paddd %xmm5, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7]
+; SSE41-NEXT: pmulld %xmm5, %xmm3
+; SSE41-NEXT: psubd %xmm3, %xmm0
+; SSE41-NEXT: pmuldq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm4, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE41-NEXT: paddd %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: psrld $31, %xmm3
+; SSE41-NEXT: psrad $2, %xmm2
+; SSE41-NEXT: paddd %xmm3, %xmm2
+; SSE41-NEXT: pmulld %xmm5, %xmm2
+; SSE41-NEXT: psubd %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test11:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: psrad $31, %xmm6
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: paddd %xmm4, %xmm6
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,0,3,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm5, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm7[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,2,1,3]
+; SSE-NEXT: psubd %xmm6, %xmm7
+; SSE-NEXT: paddd %xmm0, %xmm7
+; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: psrld $31, %xmm4
+; SSE-NEXT: psrad $2, %xmm7
+; SSE-NEXT: paddd %xmm4, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm7
+; SSE-NEXT: pmuludq %xmm4, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm6[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,2,1,3]
+; SSE-NEXT: psubd %xmm6, %xmm0
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: psrad $31, %xmm6
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: paddd %xmm3, %xmm6
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm5, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: psubd %xmm6, %xmm2
+; SSE-NEXT: paddd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: psrld $31, %xmm3
+; SSE-NEXT: psrad $2, %xmm2
+; SSE-NEXT: paddd %xmm3, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; SSE-NEXT: psubd %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test11:
+; AVX: # BB#0:
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm2 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpalignr {{.*#+}} ymm3 = ymm0[4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,20,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19]
+; AVX-NEXT: vpmuldq %ymm2, %ymm3, %ymm2
+; AVX-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3],ymm1[20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19]
+; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm1
+; AVX-NEXT: vpsrld $31, %ymm1, %ymm2
+; AVX-NEXT: vpsrad $2, %ymm1, %ymm1
+; AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
+; AVX-NEXT: vpmulld %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%rem = srem <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7>
ret <8 x i32> %rem
-
-; AVX-LABEL: test11:
-; AVX: vpbroadcastd
-; AVX: vpalignr $4
-; AVX: vpalignr $4
-; AVX: vpmuldq
-; AVX: vpmuldq
-; AVX: vpalignr $4
-; AVX: vpblendd $170
-; AVX: vpadd
-; AVX: vpsrld $31
-; AVX: vpsrad $2
-; AVX: vpadd
-; AVX: vpmulld
}
define <2 x i16> @test12() {
+; SSE41-LABEL: test12:
+; SSE41: # BB#0:
+; SSE41-NEXT: xorps %xmm0, %xmm0
+; SSE41-NEXT: retq
+;
+; SSE-LABEL: test12:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test12:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
%I8 = insertelement <2 x i16> zeroinitializer, i16 -1, i32 0
%I9 = insertelement <2 x i16> %I8, i16 -1, i32 1
%B9 = urem <2 x i16> %I9, %I9
ret <2 x i16> %B9
-
-; AVX-LABEL: test12:
-; AVX: xorps
}
define <4 x i32> @PR20355(<4 x i32> %a) {
+; SSE41-LABEL: PR20355:
+; SSE41: # BB#0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE41-NEXT: pmuldq %xmm2, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $31, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
; SSE-LABEL: PR20355:
-; SSE: movdqa {{(.*LCPI|__xmm at 55555556555555565555555655555556).*}}, %[[X1:xmm[0-9]+]]
-; SSE-NEXT: movdqa %[[X1]], %[[X2:xmm[0-9]+]]
-; SSE-NEXT: psrad $31, %[[X2]]
-; SSE-NEXT: pand %xmm0, %[[X2]]
-; SSE-NEXT: movdqa %xmm0, %[[X3:xmm[0-9]+]]
-; SSE-NEXT: psrad $31, %[[X3]]
-; SSE-NEXT: pand %[[X1]], %[[X3]]
-; SSE-NEXT: paddd %[[X2]], %[[X3]]
-; SSE-NEXT: pshufd {{.*}} #{{#?}} [[X4:xmm[0-9]+]] = xmm0[1,0,3,0]
-; SSE-NEXT: pmuludq %[[X1]], %xmm0
-; SSE-NEXT: pshufd {{.*}} #{{#?}} [[X1]] = [[X1]][1,0,3,0]
-; SSE-NEXT: pmuludq %[[X4]], %[[X1]]
-; SSE-NEXT: shufps {{.*}} #{{#?}} xmm0 = xmm0[1,3],[[X1]][1,3]
-; SSE-NEXT: pshufd {{.*}} #{{#?}} [[X5:xmm[0-9]+]] = xmm0[0,2,1,3]
-; SSE-NEXT: psubd %[[X3]], %[[X5]]
-; SSE-NEXT: movdqa %[[X5]], %xmm0
-; SSE-NEXT: psrld $31, %xmm0
-; SSE-NEXT: paddd %[[X5]], %xmm0
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1431655766,1431655766,1431655766,1431655766]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: paddd %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; SSE-NEXT: pmuludq %xmm2, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
+; SSE-NEXT: psubd %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrld $31, %xmm0
+; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
-; SSE41-LABEL: PR20355:
-; SSE41: movdqa {{(.*LCPI|__xmm at 55555556555555565555555655555556).*}}, %[[X1:xmm[0-9]+]]
-; SSE41-NEXT: pshufd {{.*}} #{{#?}} [[X2:xmm[0-9]+]] = xmm0[1,0,3,0]
-; SSE41-NEXT: pmuldq %[[X1]], %xmm0
-; SSE41-NEXT: pshufd {{.*}} #{{#?}} [[X1]] = [[X1]][1,0,3,0]
-; SSE41-NEXT: pmuldq %[[X2]], %[[X1]]
-; SSE41-NEXT: shufps {{.*}} #{{#?}} xmm0 = xmm0[1,3],[[X1]][1,3]
-; SSE41-NEXT: pshufd {{.*}} #{{#?}} [[X3:xmm[0-9]+]] = xmm0[0,2,1,3]
-; SSE41-NEXT: movdqa %[[X3]], %xmm0
-; SSE41-NEXT: psrld $31, %xmm0
-; SSE41-NEXT: paddd %[[X3]], %xmm0
-; SSE41-NEXT: retq
+; AVX-LABEL: PR20355:
+; AVX: # BB#0: # %entry
+; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,0,3,0]
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,0]
+; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[1,3],xmm0[1,3]
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%sdiv = sdiv <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
ret <4 x i32> %sdiv
Modified: llvm/trunk/test/CodeGen/X86/vselect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect.ll?rev=218954&r1=218953&r2=218954&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect.ll Thu Oct 2 20:57:38 2014
@@ -3,270 +3,254 @@
; Verify that we don't emit packed vector shifts instructions if the
; condition used by the vector select is a vector of constants.
-
define <4 x float> @test1(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0:
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test1
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <4 x float> @test2(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 true, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test2
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <4 x float> @test3(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test3
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <4 x float> @test4(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test4
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: movaps %xmm1, %xmm0
-; CHECK: ret
-
define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test5:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test5
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test6:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps {{.*#+}} xmm1 = [0,65535,0,65535,0,65535,0,65535]
+; CHECK-NEXT: andps %xmm0, %xmm1
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> %a, <8 x i16> %a
ret <8 x i16> %1
}
-; CHECK-LABEL: test6
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <8 x i16> @test7(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test7:
+; CHECK: # BB#0:
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test7
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
-
define <8 x i16> @test8(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test8:
+; CHECK: # BB#0:
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm1
+; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: orps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test8
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test9:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test9
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: movaps %xmm1, %xmm0
-; CHECK-NEXT: ret
define <8 x i16> @test10(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test10:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test10
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
define <8 x i16> @test11(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test11:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps {{.*#+}} xmm2 = <0,65535,65535,0,u,65535,65535,u>
+; CHECK-NEXT: andps %xmm2, %xmm0
+; CHECK-NEXT: andnps %xmm1, %xmm2
+; CHECK-NEXT: orps %xmm0, %xmm2
+; CHECK-NEXT: movaps %xmm2, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 true, i1 true, i1 false, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test11
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test12:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 undef, i1 false, i1 false, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test12
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
define <8 x i16> @test13(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test13:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test13
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK: ret
; Fold (vselect (build_vector AllOnes), N1, N2) -> N1
-
define <4 x float> @test14(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test14:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 undef, i1 true, i1 undef>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test14
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: pcmpeq
-; CHECK: ret
define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test15:
+; CHECK: # BB#0:
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 true, i1 true, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test15
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: pcmpeq
-; CHECK: ret
; Fold (vselect (build_vector AllZeros), N1, N2) -> N2
-
define <4 x float> @test16(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test16:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 undef, i1 false, i1 undef>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
-}
-; CHECK-LABEL: test16
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: ret
+}
define <8 x i16> @test17(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: test17:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <8 x i1> <i1 false, i1 false, i1 false, i1 undef, i1 undef, i1 false, i1 false, i1 undef>, <8 x i16> %a, <8 x i16> %b
ret <8 x i16> %1
}
-; CHECK-LABEL: test17
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: ret
define <4 x float> @test18(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test18:
+; CHECK: # BB#0:
+; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test18
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movss
-; CHECK: ret
define <4 x i32> @test19(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test19:
+; CHECK: # BB#0:
+; CHECK-NEXT: movss %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %1
}
-; CHECK-LABEL: test19
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movss
-; CHECK: ret
define <2 x double> @test20(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test20:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
}
-; CHECK-LABEL: test20
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movsd
-; CHECK: ret
define <2 x i64> @test21(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test21:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 false, i1 true>, <2 x i64> %a, <2 x i64> %b
ret <2 x i64> %1
}
-; CHECK-LABEL: test21
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movsd
-; CHECK: ret
define <4 x float> @test22(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test22:
+; CHECK: # BB#0:
+; CHECK-NEXT: movss %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x float> %a, <4 x float> %b
ret <4 x float> %1
}
-; CHECK-LABEL: test22
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movss
-; CHECK: ret
define <4 x i32> @test23(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: test23:
+; CHECK: # BB#0:
+; CHECK-NEXT: movss %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i32> %a, <4 x i32> %b
ret <4 x i32> %1
}
-; CHECK-LABEL: test23
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movss
-; CHECK: ret
define <2 x double> @test24(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test24:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x double> %a, <2 x double> %b
ret <2 x double> %1
}
-; CHECK-LABEL: test24
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movsd
-; CHECK: ret
define <2 x i64> @test25(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test25:
+; CHECK: # BB#0:
+; CHECK-NEXT: movsd %xmm0, %xmm1
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = select <2 x i1> <i1 true, i1 false>, <2 x i64> %a, <2 x i64> %b
ret <2 x i64> %1
}
-; CHECK-LABEL: test25
-; CHECK-NOT: psllw
-; CHECK-NOT: psraw
-; CHECK-NOT: xorps
-; CHECK: movsd
-; CHECK: ret
define <4 x float> @select_of_shuffles_0(<2 x float> %a0, <2 x float> %b0, <2 x float> %a1, <2 x float> %b1) {
-; CHECK-LABEL: select_of_shuffles_0
-; CHECK-DAG: movlhps %xmm2, [[REGA:%xmm[0-9]+]]
-; CHECK-DAG: movlhps %xmm3, [[REGB:%xmm[0-9]+]]
-; CHECK: subps [[REGB]], [[REGA]]
+; CHECK-LABEL: select_of_shuffles_0:
+; CHECK: # BB#0:
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; CHECK-NEXT: subps %xmm1, %xmm0
+; CHECK-NEXT: retq
%1 = shufflevector <2 x float> %a0, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%2 = shufflevector <2 x float> %a1, <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
%3 = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x float> %2, <4 x float> %1
@@ -277,11 +261,23 @@ define <4 x float> @select_of_shuffles_0
ret <4 x float> %7
}
-; CHECK-LABEL: @select_illegal
-; CHECK: mov
-; CHECK: ret
; PR20677
define <16 x double> @select_illegal(<16 x double> %a, <16 x double> %b) {
+; CHECK-LABEL: select_illegal:
+; CHECK: # BB#0:
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm4
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm5
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm6
+; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm7
+; CHECK-NEXT: movaps %xmm7, 112(%rdi)
+; CHECK-NEXT: movaps %xmm6, 96(%rdi)
+; CHECK-NEXT: movaps %xmm5, 80(%rdi)
+; CHECK-NEXT: movaps %xmm4, 64(%rdi)
+; CHECK-NEXT: movaps %xmm3, 48(%rdi)
+; CHECK-NEXT: movaps %xmm2, 32(%rdi)
+; CHECK-NEXT: movaps %xmm1, 16(%rdi)
+; CHECK-NEXT: movaps %xmm0, (%rdi)
+; CHECK-NEXT: retq
%sel = select <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x double> %a, <16 x double> %b
ret <16 x double> %sel
}
Modified: llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll?rev=218954&r1=218953&r2=218954&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_shuffle-1.ll Thu Oct 2 20:57:38 2014
@@ -2,42 +2,53 @@
; widening shuffle v3float and then a add
define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-entry:
; CHECK-LABEL: shuf:
-; CHECK: extractps
-; CHECK: extractps
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: addps %xmm1, %xmm0
+; CHECK-NEXT: extractps $2, %xmm0, 8(%eax)
+; CHECK-NEXT: extractps $1, %xmm0, 4(%eax)
+; CHECK-NEXT: movss %xmm0, (%eax)
+; CHECK-NEXT: retl
+entry:
%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2>
%val = fadd <3 x float> %x, %src2
store <3 x float> %val, <3 x float>* %dst.addr
ret void
-; CHECK: ret
}
; widening shuffle v3float with a different mask and then a add
define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-entry:
; CHECK-LABEL: shuf2:
-; CHECK: extractps
-; CHECK: extractps
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; CHECK-NEXT: addps %xmm1, %xmm0
+; CHECK-NEXT: extractps $2, %xmm0, 8(%eax)
+; CHECK-NEXT: extractps $1, %xmm0, 4(%eax)
+; CHECK-NEXT: movss %xmm0, (%eax)
+; CHECK-NEXT: retl
+entry:
%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2>
%val = fadd <3 x float> %x, %src2
store <3 x float> %val, <3 x float>* %dst.addr
ret void
-; CHECK: ret
}
; Example of when widening a v3float operation causes the DAG to replace a node
; with the operation that we are currently widening, i.e. when replacing
; opA with opB, the DAG will produce new operations with opA.
define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
-entry:
; CHECK-LABEL: shuf3:
-; CHECK-NOT: movlhps
-; CHECK-NOT: shufps
-; CHECK: pshufd
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,0,0]
+; CHECK-NEXT: movdqa %xmm0, (%eax)
+; CHECK-NEXT: retl
+entry:
%shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
- %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
+ %tmp25.i.i = shufflevector <4 x float> %shuffle.i.i.i12, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
%tmp1.i.i = shufflevector <3 x float> %tmp25.i.i, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%tmp3.i13 = shufflevector <4 x float> %tmp1.i.i, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2> ; <<3 x float>>
%tmp6.i14 = shufflevector <3 x float> %tmp3.i13, <3 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -45,27 +56,34 @@ entry:
%tmp2.i18 = shufflevector <3 x float> %tmp97.i, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
%t5 = bitcast <4 x float> %tmp2.i18 to <4 x i32>
%shr.i.i19 = lshr <4 x i32> %t5, <i32 19, i32 19, i32 19, i32 19>
- %and.i.i20 = and <4 x i32> %shr.i.i19, <i32 4080, i32 4080, i32 4080, i32 4080>
+ %and.i.i20 = and <4 x i32> %shr.i.i19, <i32 4080, i32 4080, i32 4080, i32 4080>
%shuffle.i.i.i21 = shufflevector <4 x float> %tmp2.i18, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
store <4 x float> %shuffle.i.i.i21, <4 x float>* %dst
ret void
-; CHECK: ret
}
; PR10421: make sure we correctly handle extreme widening with CONCAT_VECTORS
define <8 x i8> @shuf4(<4 x i8> %a, <4 x i8> %b) nounwind readnone {
; CHECK-LABEL: shuf4:
-; CHECK-NOT: punpckldq
+; CHECK: # BB#0:
+; CHECK-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,xmm1[4],zero,xmm1[8],zero,xmm1[12],zero
+; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: por %xmm1, %xmm0
+; CHECK-NEXT: retl
%vshuf = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i8> %vshuf
-; CHECK: ret
}
; PR11389: another CONCAT_VECTORS case
define void @shuf5(<8 x i8>* %p) nounwind {
; CHECK-LABEL: shuf5:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4,33,u,u,u,u,u,u>
+; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,0,0,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-NEXT: movlpd %xmm0, (%eax)
+; CHECK-NEXT: retl
%v = shufflevector <2 x i8> <i8 4, i8 33>, <2 x i8> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
store <8 x i8> %v, <8 x i8>* %p, align 8
ret void
-; CHECK: ret
}
More information about the llvm-commits
mailing list