[llvm] 7f0ed92 - [X86][SSE] Add missing USUBSAT test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 15 03:38:45 PST 2021


Author: Simon Pilgrim
Date: 2021-02-15T11:38:23Z
New Revision: 7f0ed92667249b48e2064c0e67cc256b6141752b

URL: https://github.com/llvm/llvm-project/commit/7f0ed92667249b48e2064c0e67cc256b6141752b
DIFF: https://github.com/llvm/llvm-project/commit/7f0ed92667249b48e2064c0e67cc256b6141752b.diff

LOG: [X86][SSE] Add missing USUBSAT test coverage

Before we start removing combineSubToSubus (PR40111) - make sure we have actually have test coverage for SUB(X,TRUNC(UMIN(ZEXT(X),Y))) -> USUBSAT(X,TRUNC(UMIN(Y,C)))) patterns

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/psubus.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index 3220edb0231c..6a6e33fb025f 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -2565,3 +2565,663 @@ define i64 @test31(<2 x i64> %x) {
   %ext = extractelement <2 x i64> %bc, i32 0
   ret i64 %ext
 }
+
+; v8i16/v8i32 - sub(x,trunc(umin(zext(x),y)))
+define <8 x i16> @test32(<8 x i16> %a0, <8 x i32> %a1) {
+; SSE2-LABEL: test32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pxor %xmm3, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSE2-NEXT:    movdqa %xmm5, %xmm6
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT:    pand %xmm6, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm6
+; SSE2-NEXT:    por %xmm2, %xmm6
+; SSE2-NEXT:    pslld $16, %xmm6
+; SSE2-NEXT:    psrad $16, %xmm6
+; SSE2-NEXT:    pxor %xmm1, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT:    pxor %xmm5, %xmm4
+; SSE2-NEXT:    pand %xmm1, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pslld $16, %xmm5
+; SSE2-NEXT:    psrad $16, %xmm5
+; SSE2-NEXT:    packssdw %xmm6, %xmm5
+; SSE2-NEXT:    psubusw %xmm5, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test32:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSSE3-NEXT:    pxor %xmm3, %xmm4
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSSE3-NEXT:    movdqa %xmm5, %xmm6
+; SSSE3-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
+; SSSE3-NEXT:    pand %xmm6, %xmm2
+; SSSE3-NEXT:    pandn %xmm4, %xmm6
+; SSSE3-NEXT:    por %xmm2, %xmm6
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT:    pshufb %xmm2, %xmm6
+; SSSE3-NEXT:    pxor %xmm1, %xmm3
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm1
+; SSSE3-NEXT:    pandn %xmm4, %xmm5
+; SSSE3-NEXT:    por %xmm1, %xmm5
+; SSSE3-NEXT:    pshufb %xmm2, %xmm5
+; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; SSSE3-NEXT:    psubusw %xmm5, %xmm0
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test32:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
+; SSE41-NEXT:    pminud %xmm3, %xmm2
+; SSE41-NEXT:    pminud %xmm3, %xmm1
+; SSE41-NEXT:    packusdw %xmm2, %xmm1
+; SSE41-NEXT:    psubusw %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: test32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
+; AVX1-NEXT:    vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpminud %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX2-NEXT:    vpminud %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovusdw %ymm1, %xmm1
+; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %zext = zext <8 x i16> %a0 to <8 x i32>
+  %icmp = icmp ult <8 x i32> %zext, %a1
+  %umin = select <8 x i1> %icmp, <8 x i32> %zext, <8 x i32> %a1
+  %trunc = trunc <8 x i32> %umin to <8 x i16>
+  %sub = sub <8 x i16> %a0, %trunc
+  ret <8 x i16> %sub
+}
+
+; v8i32/v8i64 - sub(x,trunc(umin(y,zext(x))))
+define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test33:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor %xmm7, %xmm7
+; SSE2-NEXT:    movdqa %xmm1, %xmm8
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm9
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; SSE2-NEXT:    movdqa %xmm0, %xmm10
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm11
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm12 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm3, %xmm6
+; SSE2-NEXT:    pxor %xmm12, %xmm6
+; SSE2-NEXT:    movdqa %xmm11, %xmm7
+; SSE2-NEXT:    pxor %xmm12, %xmm7
+; SSE2-NEXT:    movdqa %xmm7, %xmm13
+; SSE2-NEXT:    pcmpgtd %xmm6, %xmm13
+; SSE2-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2-NEXT:    pand %xmm14, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSE2-NEXT:    por %xmm6, %xmm7
+; SSE2-NEXT:    pand %xmm7, %xmm3
+; SSE2-NEXT:    pandn %xmm11, %xmm7
+; SSE2-NEXT:    por %xmm3, %xmm7
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm12, %xmm3
+; SSE2-NEXT:    movdqa %xmm10, %xmm6
+; SSE2-NEXT:    pxor %xmm12, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm11
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm11
+; SSE2-NEXT:    pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm13, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm11[1,1,3,3]
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pandn %xmm10, %xmm3
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSE2-NEXT:    movdqa %xmm5, %xmm2
+; SSE2-NEXT:    pxor %xmm12, %xmm2
+; SSE2-NEXT:    movdqa %xmm9, %xmm6
+; SSE2-NEXT:    pxor %xmm12, %xmm6
+; SSE2-NEXT:    movdqa %xmm6, %xmm7
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm7
+; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm10, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm6
+; SSE2-NEXT:    pand %xmm6, %xmm5
+; SSE2-NEXT:    pandn %xmm9, %xmm6
+; SSE2-NEXT:    por %xmm5, %xmm6
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm12, %xmm2
+; SSE2-NEXT:    pxor %xmm8, %xmm12
+; SSE2-NEXT:    movdqa %xmm12, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm12
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm12[1,1,3,3]
+; SSE2-NEXT:    pand %xmm7, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm5
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    pandn %xmm8, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
+; SSE2-NEXT:    psubd %xmm3, %xmm0
+; SSE2-NEXT:    psubd %xmm5, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test33:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    pxor %xmm7, %xmm7
+; SSSE3-NEXT:    movdqa %xmm1, %xmm8
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm9
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm10
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm11
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm12 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm3, %xmm6
+; SSSE3-NEXT:    pxor %xmm12, %xmm6
+; SSSE3-NEXT:    movdqa %xmm11, %xmm7
+; SSSE3-NEXT:    pxor %xmm12, %xmm7
+; SSSE3-NEXT:    movdqa %xmm7, %xmm13
+; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm13
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm6, %xmm7
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm14, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSSE3-NEXT:    por %xmm6, %xmm7
+; SSSE3-NEXT:    pand %xmm7, %xmm3
+; SSSE3-NEXT:    pandn %xmm11, %xmm7
+; SSSE3-NEXT:    por %xmm3, %xmm7
+; SSSE3-NEXT:    movdqa %xmm2, %xmm3
+; SSSE3-NEXT:    pxor %xmm12, %xmm3
+; SSSE3-NEXT:    movdqa %xmm10, %xmm6
+; SSSE3-NEXT:    pxor %xmm12, %xmm6
+; SSSE3-NEXT:    movdqa %xmm6, %xmm11
+; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm11
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm13, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm11[1,1,3,3]
+; SSSE3-NEXT:    por %xmm6, %xmm3
+; SSSE3-NEXT:    pand %xmm3, %xmm2
+; SSSE3-NEXT:    pandn %xmm10, %xmm3
+; SSSE3-NEXT:    por %xmm2, %xmm3
+; SSSE3-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
+; SSSE3-NEXT:    movdqa %xmm5, %xmm2
+; SSSE3-NEXT:    pxor %xmm12, %xmm2
+; SSSE3-NEXT:    movdqa %xmm9, %xmm6
+; SSSE3-NEXT:    pxor %xmm12, %xmm6
+; SSSE3-NEXT:    movdqa %xmm6, %xmm7
+; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm7
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm10, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm6
+; SSSE3-NEXT:    pand %xmm6, %xmm5
+; SSSE3-NEXT:    pandn %xmm9, %xmm6
+; SSSE3-NEXT:    por %xmm5, %xmm6
+; SSSE3-NEXT:    movdqa %xmm4, %xmm2
+; SSSE3-NEXT:    pxor %xmm12, %xmm2
+; SSSE3-NEXT:    pxor %xmm8, %xmm12
+; SSSE3-NEXT:    movdqa %xmm12, %xmm5
+; SSSE3-NEXT:    pcmpgtd %xmm2, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm12
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm12[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm7, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm4
+; SSSE3-NEXT:    pandn %xmm8, %xmm5
+; SSSE3-NEXT:    por %xmm4, %xmm5
+; SSSE3-NEXT:    shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
+; SSSE3-NEXT:    psubd %xmm3, %xmm0
+; SSSE3-NEXT:    psubd %xmm5, %xmm1
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test33:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm8
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm10
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSE41-NEXT:    movdqa %xmm8, %xmm12
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm9 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm11 = xmm8[0],zero,xmm8[1],zero
+; SSE41-NEXT:    movdqa {{.*#+}} xmm14 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm3, %xmm13
+; SSE41-NEXT:    pxor %xmm14, %xmm13
+; SSE41-NEXT:    movdqa %xmm12, %xmm6
+; SSE41-NEXT:    pxor %xmm14, %xmm6
+; SSE41-NEXT:    movdqa %xmm6, %xmm0
+; SSE41-NEXT:    pcmpeqd %xmm13, %xmm0
+; SSE41-NEXT:    pcmpgtd %xmm13, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm12
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    pxor %xmm14, %xmm0
+; SSE41-NEXT:    movdqa %xmm11, %xmm3
+; SSE41-NEXT:    pxor %xmm14, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm11
+; SSE41-NEXT:    shufps {{.*#+}} xmm11 = xmm11[0,2],xmm12[0,2]
+; SSE41-NEXT:    movdqa %xmm5, %xmm0
+; SSE41-NEXT:    pxor %xmm14, %xmm0
+; SSE41-NEXT:    movdqa %xmm10, %xmm2
+; SSE41-NEXT:    pxor %xmm14, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT:    pand %xmm3, %xmm0
+; SSE41-NEXT:    por %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm5, %xmm10
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    pxor %xmm14, %xmm0
+; SSE41-NEXT:    pxor %xmm9, %xmm14
+; SSE41-NEXT:    movdqa %xmm14, %xmm2
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm14
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm14[0,0,2,2]
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    por %xmm14, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm4, %xmm9
+; SSE41-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,2],xmm10[0,2]
+; SSE41-NEXT:    psubd %xmm11, %xmm8
+; SSE41-NEXT:    psubd %xmm9, %xmm1
+; SSE41-NEXT:    movdqa %xmm8, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: test33:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm12 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm10, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm3, %xmm7, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm8, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm13
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm6
+; AVX1-NEXT:    vpor %xmm3, %xmm12, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm14
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm9, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm6, %xmm9, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm14, %xmm1, %xmm12, %xmm1
+; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm7, %xmm8, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm2, %xmm10, %xmm2
+; AVX1-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: test33:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT:    vpminuq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %zext = zext <8 x i32> %a0 to <8 x i64>
+  %icmp = icmp ult <8 x i64> %a1, %zext
+  %umin = select <8 x i1> %icmp, <8 x i64> %a1, <8 x i64> %zext
+  %trunc = trunc <8 x i64> %umin to <8 x i32>
+  %sub = sub <8 x i32> %a0, %trunc
+  ret <8 x i32> %sub
+}
+
+; v8i32/v8i64 - sub(x,trunc(umin(zext(and(x,1)),y)))
+define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
+; SSE2-LABEL: test34:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm6 = [1,1,1,1]
+; SSE2-NEXT:    pand %xmm6, %xmm0
+; SSE2-NEXT:    pand %xmm6, %xmm1
+; SSE2-NEXT:    pxor %xmm7, %xmm7
+; SSE2-NEXT:    movdqa %xmm1, %xmm8
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; SSE2-NEXT:    movdqa %xmm1, %xmm9
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; SSE2-NEXT:    movdqa %xmm0, %xmm10
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSE2-NEXT:    movdqa %xmm0, %xmm12
+; SSE2-NEXT:    punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm7[2],xmm12[3],xmm7[3]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm11 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm3, %xmm7
+; SSE2-NEXT:    pxor %xmm11, %xmm7
+; SSE2-NEXT:    movdqa %xmm12, %xmm6
+; SSE2-NEXT:    por %xmm11, %xmm6
+; SSE2-NEXT:    movdqa %xmm7, %xmm13
+; SSE2-NEXT:    pcmpgtd %xmm6, %xmm13
+; SSE2-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm7, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm14, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSE2-NEXT:    por %xmm6, %xmm7
+; SSE2-NEXT:    pand %xmm7, %xmm12
+; SSE2-NEXT:    pandn %xmm3, %xmm7
+; SSE2-NEXT:    por %xmm12, %xmm7
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm11, %xmm3
+; SSE2-NEXT:    movdqa %xmm10, %xmm6
+; SSE2-NEXT:    por %xmm11, %xmm6
+; SSE2-NEXT:    movdqa %xmm3, %xmm12
+; SSE2-NEXT:    pcmpgtd %xmm6, %xmm12
+; SSE2-NEXT:    pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm13, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm12[1,1,3,3]
+; SSE2-NEXT:    por %xmm6, %xmm3
+; SSE2-NEXT:    pand %xmm3, %xmm10
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm10, %xmm3
+; SSE2-NEXT:    packuswb %xmm7, %xmm3
+; SSE2-NEXT:    movdqa %xmm5, %xmm2
+; SSE2-NEXT:    pxor %xmm11, %xmm2
+; SSE2-NEXT:    movdqa %xmm9, %xmm6
+; SSE2-NEXT:    por %xmm11, %xmm6
+; SSE2-NEXT:    movdqa %xmm2, %xmm7
+; SSE2-NEXT:    pcmpgtd %xmm6, %xmm7
+; SSE2-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm10, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm6
+; SSE2-NEXT:    pand %xmm6, %xmm9
+; SSE2-NEXT:    pandn %xmm5, %xmm6
+; SSE2-NEXT:    por %xmm9, %xmm6
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm11, %xmm2
+; SSE2-NEXT:    por %xmm8, %xmm11
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm11, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm11
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm11[1,1,3,3]
+; SSE2-NEXT:    pand %xmm7, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm5
+; SSE2-NEXT:    pand %xmm5, %xmm8
+; SSE2-NEXT:    pandn %xmm4, %xmm5
+; SSE2-NEXT:    por %xmm8, %xmm5
+; SSE2-NEXT:    packuswb %xmm6, %xmm5
+; SSE2-NEXT:    psubd %xmm3, %xmm0
+; SSE2-NEXT:    psubd %xmm5, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test34:
+; SSSE3:       # %bb.0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm6 = [1,1,1,1]
+; SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSSE3-NEXT:    pand %xmm6, %xmm1
+; SSSE3-NEXT:    pxor %xmm7, %xmm7
+; SSSE3-NEXT:    movdqa %xmm1, %xmm8
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; SSSE3-NEXT:    movdqa %xmm1, %xmm9
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm10
+; SSSE3-NEXT:    punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
+; SSSE3-NEXT:    movdqa %xmm0, %xmm12
+; SSSE3-NEXT:    punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm7[2],xmm12[3],xmm7[3]
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm11 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT:    movdqa %xmm3, %xmm7
+; SSSE3-NEXT:    pxor %xmm11, %xmm7
+; SSSE3-NEXT:    movdqa %xmm12, %xmm6
+; SSSE3-NEXT:    por %xmm11, %xmm6
+; SSSE3-NEXT:    movdqa %xmm7, %xmm13
+; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm13
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm7, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm14, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSSE3-NEXT:    por %xmm6, %xmm7
+; SSSE3-NEXT:    pand %xmm7, %xmm12
+; SSSE3-NEXT:    pandn %xmm3, %xmm7
+; SSSE3-NEXT:    por %xmm12, %xmm7
+; SSSE3-NEXT:    movdqa %xmm2, %xmm3
+; SSSE3-NEXT:    pxor %xmm11, %xmm3
+; SSSE3-NEXT:    movdqa %xmm10, %xmm6
+; SSSE3-NEXT:    por %xmm11, %xmm6
+; SSSE3-NEXT:    movdqa %xmm3, %xmm12
+; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm12
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm3, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm13, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm12[1,1,3,3]
+; SSSE3-NEXT:    por %xmm6, %xmm3
+; SSSE3-NEXT:    pand %xmm3, %xmm10
+; SSSE3-NEXT:    pandn %xmm2, %xmm3
+; SSSE3-NEXT:    por %xmm10, %xmm3
+; SSSE3-NEXT:    packuswb %xmm7, %xmm3
+; SSSE3-NEXT:    movdqa %xmm5, %xmm2
+; SSSE3-NEXT:    pxor %xmm11, %xmm2
+; SSSE3-NEXT:    movdqa %xmm9, %xmm6
+; SSSE3-NEXT:    por %xmm11, %xmm6
+; SSSE3-NEXT:    movdqa %xmm2, %xmm7
+; SSSE3-NEXT:    pcmpgtd %xmm6, %xmm7
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm6
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm10, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm6
+; SSSE3-NEXT:    pand %xmm6, %xmm9
+; SSSE3-NEXT:    pandn %xmm5, %xmm6
+; SSSE3-NEXT:    por %xmm9, %xmm6
+; SSSE3-NEXT:    movdqa %xmm4, %xmm2
+; SSSE3-NEXT:    pxor %xmm11, %xmm2
+; SSSE3-NEXT:    por %xmm8, %xmm11
+; SSSE3-NEXT:    movdqa %xmm2, %xmm5
+; SSSE3-NEXT:    pcmpgtd %xmm11, %xmm5
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm2, %xmm11
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm11[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm7, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT:    por %xmm2, %xmm5
+; SSSE3-NEXT:    pand %xmm5, %xmm8
+; SSSE3-NEXT:    pandn %xmm4, %xmm5
+; SSSE3-NEXT:    por %xmm8, %xmm5
+; SSSE3-NEXT:    packuswb %xmm6, %xmm5
+; SSSE3-NEXT:    psubd %xmm3, %xmm0
+; SSSE3-NEXT:    psubd %xmm5, %xmm1
+; SSSE3-NEXT:    retq
+;
+; SSE41-LABEL: test34:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm11
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
+; SSE41-NEXT:    pand %xmm0, %xmm11
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm8 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT:    pxor %xmm0, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm9
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE41-NEXT:    pmovzxdq {{.*#+}} xmm10 = xmm11[0],zero,xmm11[1],zero
+; SSE41-NEXT:    movdqa %xmm11, %xmm12
+; SSE41-NEXT:    punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    pxor %xmm7, %xmm6
+; SSE41-NEXT:    movdqa %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    movdqa %xmm6, %xmm13
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm13
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pand %xmm13, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm12, %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm6
+; SSE41-NEXT:    pxor %xmm7, %xmm6
+; SSE41-NEXT:    movdqa %xmm10, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    movdqa %xmm6, %xmm12
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm12
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
+; SSE41-NEXT:    pand %xmm12, %xmm0
+; SSE41-NEXT:    por %xmm6, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm10, %xmm2
+; SSE41-NEXT:    packusdw %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm5, %xmm3
+; SSE41-NEXT:    pxor %xmm7, %xmm3
+; SSE41-NEXT:    movdqa %xmm9, %xmm0
+; SSE41-NEXT:    por %xmm7, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm9, %xmm5
+; SSE41-NEXT:    movdqa %xmm4, %xmm3
+; SSE41-NEXT:    pxor %xmm7, %xmm3
+; SSE41-NEXT:    por %xmm8, %xmm7
+; SSE41-NEXT:    movdqa %xmm3, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm7, %xmm6
+; SSE41-NEXT:    pcmpgtd %xmm7, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm3, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm8, %xmm4
+; SSE41-NEXT:    packusdw %xmm5, %xmm4
+; SSE41-NEXT:    psubd %xmm2, %xmm11
+; SSE41-NEXT:    psubd %xmm4, %xmm1
+; SSE41-NEXT:    movdqa %xmm11, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-LABEL: test34:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm9 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vunpckhps {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm12 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm10 = xmm5[0],zero,xmm5[1],zero
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpor %xmm3, %xmm10, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm3, %xmm7, %xmm4
+; AVX1-NEXT:    vorps %xmm3, %xmm8, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm13
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm6
+; AVX1-NEXT:    vpor %xmm3, %xmm12, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm14
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm4
+; AVX1-NEXT:    vorps %xmm3, %xmm9, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm9, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm14, %xmm12, %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm8, %xmm7, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm10, %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test34:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
+; AVX2-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %ymm5, %ymm2, %ymm6
+; AVX2-NEXT:    vpor %ymm5, %ymm4, %ymm7
+; AVX2-NEXT:    vpcmpgtq %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vblendvpd %ymm6, %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpxor %ymm5, %ymm1, %ymm4
+; AVX2-NEXT:    vpor %ymm5, %ymm3, %ymm5
+; AVX2-NEXT:    vpcmpgtq %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: test34:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT:    vpminuq %zmm1, %zmm2, %zmm1
+; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %mask = and <8 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %zext = zext <8 x i32> %mask to <8 x i64>
+  %icmp = icmp ult <8 x i64> %zext, %a1
+  %umin = select <8 x i1> %icmp, <8 x i64> %zext, <8 x i64> %a1
+  %trunc = trunc <8 x i64> %umin to <8 x i32>
+  %sub = sub <8 x i32> %mask, %trunc
+  ret <8 x i32> %sub
+}


        


More information about the llvm-commits mailing list