[llvm] r355404 - [X86] Add SADDO/UADDO and SSUBO/USUBO combine tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 5 06:52:43 PST 2019


Author: rksimon
Date: Tue Mar  5 06:52:42 2019
New Revision: 355404

URL: http://llvm.org/viewvc/llvm-project?rev=355404&view=rev
Log:
[X86] Add SADDO/UADDO and SSUBO/USUBO combine tests

Include scalar and vector test variants covering the folds in DAGCombiner (vector isn't currently supported - PR40442)

Added:
    llvm/trunk/test/CodeGen/X86/combine-addo.ll
    llvm/trunk/test/CodeGen/X86/combine-subo.ll

Added: llvm/trunk/test/CodeGen/X86/combine-addo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-addo.ll?rev=355404&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-addo.ll (added)
+++ llvm/trunk/test/CodeGen/X86/combine-addo.ll Tue Mar  5 06:52:42 2019
@@ -0,0 +1,216 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+
+declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+; fold (sadd x, 0) -> x
+define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_sadd_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    addl $0, %eax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_sadd_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    addl $0, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_sadd_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pxor %xmm0, %xmm0
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_sadd_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm0, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm3
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpandn %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (uadd x, 0) -> x
+define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_uadd_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_uadd_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_uadd_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pmaxud %xmm0, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_uadd_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmaxud %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (uadd (xor a, -1), 1) -> (usub 0, a) and flip carry
+define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_uadd_not:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    negl %eax
+; SSE-NEXT:    cmovael %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_uadd_not:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    negl %eax
+; AVX-NEXT:    cmovael %esi, %eax
+; AVX-NEXT:    retq
+  %1 = xor i32 %a0, -1
+  %2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 1)
+  %3 = extractvalue {i32, i1} %2, 0
+  %4 = extractvalue {i32, i1} %2, 1
+  %5 = select i1 %4, i32 %a1, i32 %3
+  ret i32 %5
+}
+
+define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_uadd_not:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,4294967295]
+; SSE-NEXT:    paddd %xmm0, %xmm2
+; SSE-NEXT:    pmaxud %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_uadd_not:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm3
+; AVX-NEXT:    vpmaxud %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %1 = xor <4 x i32> %a0, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %2 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 -1>)
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %2, 0
+  %4 = extractvalue {<4 x i32>, <4 x i1>} %2, 1
+  %5 = select <4 x i1> %4, <4 x i32> %a1, <4 x i32> %3
+  ret <4 x i32> %5
+}
+
+; if uaddo never overflows, replace with add
+define i32 @combine_uadd_no_overflow(i32 %a0, i32 %a1, i32 %a2) {
+; SSE-LABEL: combine_uadd_no_overflow:
+; SSE:       # %bb.0:
+; SSE-NEXT:    # kill: def $edx killed $edx def $rdx
+; SSE-NEXT:    # kill: def $esi killed $esi def $rsi
+; SSE-NEXT:    shrl $16, %esi
+; SSE-NEXT:    shrl $16, %edx
+; SSE-NEXT:    leal (%rdx,%rsi), %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_uadd_no_overflow:
+; AVX:       # %bb.0:
+; AVX-NEXT:    # kill: def $edx killed $edx def $rdx
+; AVX-NEXT:    # kill: def $esi killed $esi def $rsi
+; AVX-NEXT:    shrl $16, %esi
+; AVX-NEXT:    shrl $16, %edx
+; AVX-NEXT:    leal (%rdx,%rsi), %eax
+; AVX-NEXT:    retq
+  %1 = lshr i32 %a1, 16
+  %2 = lshr i32 %a2, 16
+  %3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 %2)
+  %4 = extractvalue {i32, i1} %3, 0
+  %5 = extractvalue {i32, i1} %3, 1
+  %6 = select i1 %5, i32 %a2, i32 %4
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_uadd_no_overflow(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+; SSE-LABEL: combine_vec_uadd_no_overflow:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    psrld $16, %xmm1
+; SSE-NEXT:    psrld $16, %xmm0
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_uadd_no_overflow:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $16, %xmm1, %xmm0
+; AVX-NEXT:    vpsrld $16, %xmm2, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = lshr <4 x i32> %a1, <i32 16, i32 16, i32 16, i32 16>
+  %2 = lshr <4 x i32> %a2, <i32 16, i32 16, i32 16, i32 16>
+  %3 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> %2)
+  %4 = extractvalue {<4 x i32>, <4 x i1>} %3, 0
+  %5 = extractvalue {<4 x i32>, <4 x i1>} %3, 1
+  %6 = select <4 x i1> %5, <4 x i32> %a2, <4 x i32> %4
+  ret <4 x i32> %4
+}

Added: llvm/trunk/test/CodeGen/X86/combine-subo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-subo.ll?rev=355404&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-subo.ll (added)
+++ llvm/trunk/test/CodeGen/X86/combine-subo.ll Tue Mar  5 06:52:42 2019
@@ -0,0 +1,248 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+
+declare {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+; fold (ssub x, 0) -> x
+define i32 @combine_ssub_zero(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_ssub_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    subl $0, %eax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_ssub_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    subl $0, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 zeroinitializer)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_ssub_zero(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_ssub_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pxor %xmm3, %xmm3
+; SSE-NEXT:    pcmpgtd %xmm0, %xmm3
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pxor %xmm4, %xmm3
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm4, %xmm3
+; SSE-NEXT:    pandn %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_ssub_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm0, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm4
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpandn %xmm2, %xmm4, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (usub x, 0) -> x
+define i32 @combine_usub_zero(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_usub_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_usub_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 zeroinitializer)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_usub_zero(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_usub_zero:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pminud %xmm0, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_usub_zero:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminud %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (ssub x, x) -> 0
+define i32 @combine_ssub_self(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_ssub_self:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    subl %edi, %eax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_ssub_self:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    subl %edi, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 %a0)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_ssub_self(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_ssub_self:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubd %xmm0, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_ssub_self:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (usub x, x) -> x
+define i32 @combine_usub_self(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_usub_self:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorl %eax, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_usub_self:
+; AVX:       # %bb.0:
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a0, i32 %a0)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_usub_self(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_usub_self:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psubd %xmm0, %xmm2
+; SSE-NEXT:    pminud %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_usub_self:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm0, %xmm0, %xmm2
+; AVX-NEXT:    vpminud %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (usub -1, x) -> (xor x, -1) + no borrow
+define i32 @combine_usub_negone(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_usub_negone:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    notl %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_usub_negone:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    notl %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -1, i32 %a0)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_usub_negone(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_usub_negone:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    pminud %xmm3, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_usub_negone:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpminud %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm3
+; AVX-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a0)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}




More information about the llvm-commits mailing list