[llvm] r355407 - [X86] Add SMULO/UMULO combine tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 5 07:36:45 PST 2019


Author: rksimon
Date: Tue Mar  5 07:36:45 2019
New Revision: 355407

URL: http://llvm.org/viewvc/llvm-project?rev=355407&view=rev
Log:
[X86] Add SMULO/UMULO combine tests

Include scalar and vector test variants covering the folds in DAGCombiner (vector isn't currently supported - PR40442)

Added:
    llvm/trunk/test/CodeGen/X86/combine-mulo.ll

Added: llvm/trunk/test/CodeGen/X86/combine-mulo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-mulo.ll?rev=355407&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-mulo.ll (added)
+++ llvm/trunk/test/CodeGen/X86/combine-mulo.ll Tue Mar  5 07:36:45 2019
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+
+declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+; fold (smulo x, 2) -> (saddo x, x)
+define i32 @combine_smul_two(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_smul_two:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    addl %edi, %eax
+; SSE-NEXT:    cmovol %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_smul_two:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    addl %edi, %eax
+; AVX-NEXT:    cmovol %esi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %a0, i32 2)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_smul_two:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2,2,2,2]
+; SSE-NEXT:    pmuldq %xmm3, %xmm0
+; SSE-NEXT:    pmuldq %xmm2, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
+; SSE-NEXT:    paddd %xmm2, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm3
+; SSE-NEXT:    pxor %xmm3, %xmm0
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_smul_two:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2]
+; AVX-NEXT:    vpmuldq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpmuldq %xmm3, %xmm0, %xmm3
+; AVX-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
+; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $31, %xmm0, %xmm3
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}
+
+; fold (umulo x, 2) -> (uaddo x, x)
+define i32 @combine_umul_two(i32 %a0, i32 %a1) {
+; SSE-LABEL: combine_umul_two:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movl %edi, %eax
+; SSE-NEXT:    addl %edi, %eax
+; SSE-NEXT:    cmovbl %esi, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_umul_two:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl %edi, %eax
+; AVX-NEXT:    addl %edi, %eax
+; AVX-NEXT:    cmovbl %esi, %eax
+; AVX-NEXT:    retq
+  %1 = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %a0, i32 2)
+  %2 = extractvalue {i32, i1} %1, 0
+  %3 = extractvalue {i32, i1} %1, 1
+  %4 = select i1 %3, i32 %a1, i32 %2
+  ret i32 %4
+}
+
+define <4 x i32> @combine_vec_umul_two(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_vec_umul_two:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT:    movdqa {{.*#+}} xmm3 = [2,2,2,2]
+; SSE-NEXT:    pmuludq %xmm3, %xmm0
+; SSE-NEXT:    pmuludq %xmm2, %xmm3
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
+; SSE-NEXT:    pxor %xmm4, %xmm4
+; SSE-NEXT:    pcmpeqd %xmm3, %xmm4
+; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
+; SSE-NEXT:    pxor %xmm4, %xmm0
+; SSE-NEXT:    paddd %xmm2, %xmm2
+; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_umul_two:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2]
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpmuludq %xmm3, %xmm0, %xmm3
+; AVX-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
+; AVX-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvps %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
+  %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
+  %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
+  %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
+  ret <4 x i32> %4
+}




More information about the llvm-commits mailing list