[llvm] r340774 - [X86] Add test cases to show current codegen of v2i32 div/rem in 32-bit and 64-bit modes

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 27 14:13:07 PDT 2018


Author: ctopper
Date: Mon Aug 27 14:13:07 2018
New Revision: 340774

URL: http://llvm.org/viewvc/llvm-project?rev=340774&view=rev
Log:
[X86] Add test cases to show current codegen of v2i32 div/rem in 32-bit and 64-bit modes

In particular this shows that we end up using libcalls in 32-bit mode even for division by constant.

Added:
    llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll

Added: llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll?rev=340774&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll Mon Aug 27 14:13:07 2018
@@ -0,0 +1,624 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+
+define void @test_udiv7_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_udiv7_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movq %xmm0, %rcx
+; X86-NEXT:    movabsq $2635249153387078803, %rdi # imm = 0x2492492492492493
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    mulq %rdi
+; X86-NEXT:    subq %rdx, %rcx
+; X86-NEXT:    shrq %rcx
+; X86-NEXT:    addq %rdx, %rcx
+; X86-NEXT:    shrq $2, %rcx
+; X86-NEXT:    movq %rcx, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X86-NEXT:    movq %xmm0, %rcx
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    mulq %rdi
+; X86-NEXT:    subq %rdx, %rcx
+; X86-NEXT:    shrq %rcx
+; X86-NEXT:    addq %rdx, %rcx
+; X86-NEXT:    shrq $2, %rcx
+; X86-NEXT:    movq %rcx, %xmm0
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_udiv7_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $40, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $7, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    calll __udivdi3
+; X64-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $7, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    calll __udivdi3
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $40, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = udiv <2 x i32> %a, <i32 7, i32 7>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_urem7_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_urem7_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movq %xmm0, %rcx
+; X86-NEXT:    movabsq $2635249153387078803, %rdi # imm = 0x2492492492492493
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    mulq %rdi
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    subq %rdx, %rax
+; X86-NEXT:    shrq %rax
+; X86-NEXT:    addq %rdx, %rax
+; X86-NEXT:    shrq $2, %rax
+; X86-NEXT:    leaq (,%rax,8), %rdx
+; X86-NEXT:    subq %rdx, %rax
+; X86-NEXT:    addq %rcx, %rax
+; X86-NEXT:    movq %rax, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X86-NEXT:    movq %xmm0, %rcx
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    mulq %rdi
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    subq %rdx, %rax
+; X86-NEXT:    shrq %rax
+; X86-NEXT:    addq %rdx, %rax
+; X86-NEXT:    shrq $2, %rax
+; X86-NEXT:    leaq (,%rax,8), %rdx
+; X86-NEXT:    subq %rdx, %rax
+; X86-NEXT:    addq %rcx, %rax
+; X86-NEXT:    movq %rax, %xmm0
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_urem7_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $40, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $7, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    calll __umoddi3
+; X64-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $7, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    calll __umoddi3
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $40, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = urem <2 x i32> %a, <i32 7, i32 7>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_sdiv7_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_sdiv7_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movslq (%rdi), %rcx
+; X86-NEXT:    movslq 4(%rdi), %rax
+; X86-NEXT:    movabsq $5270498306774157605, %rdi # imm = 0x4924924924924925
+; X86-NEXT:    imulq %rdi
+; X86-NEXT:    movq %rdx, %rax
+; X86-NEXT:    shrq $63, %rax
+; X86-NEXT:    sarq %rdx
+; X86-NEXT:    addq %rax, %rdx
+; X86-NEXT:    movq %rdx, %xmm0
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    imulq %rdi
+; X86-NEXT:    movq %rdx, %rax
+; X86-NEXT:    shrq $63, %rax
+; X86-NEXT:    sarq %rdx
+; X86-NEXT:    addq %rax, %rdx
+; X86-NEXT:    movq %rdx, %xmm1
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_sdiv7_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $16, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl (%eax), %edi
+; X64-NEXT:    movl 4(%eax), %eax
+; X64-NEXT:    movl %edi, %ebx
+; X64-NEXT:    sarl $31, %ebx
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    sarl $31, %ecx
+; X64-NEXT:    pushl $0
+; X64-NEXT:    pushl $7
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, (%esp) # 16-byte Spill
+; X64-NEXT:    pushl $0
+; X64-NEXT:    pushl $7
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu (%esp), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    popl %edi
+; X64-NEXT:    popl %ebx
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = sdiv <2 x i32> %a, <i32 7, i32 7>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_srem7_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_srem7_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movslq (%rdi), %rcx
+; X86-NEXT:    movslq 4(%rdi), %rdi
+; X86-NEXT:    movabsq $5270498306774157605, %r8 # imm = 0x4924924924924925
+; X86-NEXT:    movq %rdi, %rax
+; X86-NEXT:    imulq %r8
+; X86-NEXT:    movq %rdx, %rax
+; X86-NEXT:    shrq $63, %rax
+; X86-NEXT:    sarq %rdx
+; X86-NEXT:    addq %rax, %rdx
+; X86-NEXT:    leaq (,%rdx,8), %rax
+; X86-NEXT:    subq %rax, %rdx
+; X86-NEXT:    addq %rdi, %rdx
+; X86-NEXT:    movq %rdx, %xmm0
+; X86-NEXT:    movq %rcx, %rax
+; X86-NEXT:    imulq %r8
+; X86-NEXT:    movq %rdx, %rax
+; X86-NEXT:    shrq $63, %rax
+; X86-NEXT:    sarq %rdx
+; X86-NEXT:    addq %rax, %rdx
+; X86-NEXT:    leaq (,%rdx,8), %rax
+; X86-NEXT:    subq %rax, %rdx
+; X86-NEXT:    addq %rcx, %rdx
+; X86-NEXT:    movq %rdx, %xmm1
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_srem7_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $16, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl (%eax), %edi
+; X64-NEXT:    movl 4(%eax), %eax
+; X64-NEXT:    movl %edi, %ebx
+; X64-NEXT:    sarl $31, %ebx
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    sarl $31, %ecx
+; X64-NEXT:    pushl $0
+; X64-NEXT:    pushl $7
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    calll __moddi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, (%esp) # 16-byte Spill
+; X64-NEXT:    pushl $0
+; X64-NEXT:    pushl $7
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    calll __moddi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu (%esp), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    popl %edi
+; X64-NEXT:    popl %ebx
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = srem <2 x i32> %a, <i32 7, i32 7>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_udiv_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_udiv_pow2_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    psrlq $3, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_udiv_pow2_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    psrlq $3, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%eax)
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = udiv <2 x i32> %a, <i32 8, i32 8>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_urem_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_urem_pow2_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl (%rdi), %eax
+; X86-NEXT:    movl 4(%rdi), %ecx
+; X86-NEXT:    movq %rcx, %xmm0
+; X86-NEXT:    movq %rax, %xmm1
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pand {{.*}}(%rip), %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_urem_pow2_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X64-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%eax)
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = urem <2 x i32> %a, <i32 8, i32 8>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_sdiv_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_sdiv_pow2_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT:    psrad $31, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    psrlq $31, %xmm0
+; X86-NEXT:    pand {{.*}}(%rip), %xmm0
+; X86-NEXT:    psrlq $29, %xmm0
+; X86-NEXT:    paddq %xmm2, %xmm0
+; X86-NEXT:    psllq $32, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
+; X86-NEXT:    psrad $31, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT:    psrlq $3, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_sdiv_pow2_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-NEXT:    psrad $31, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; X64-NEXT:    movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
+; X64-NEXT:    movdqa {{.*#+}} xmm3 = [31,0,31,0]
+; X64-NEXT:    movdqa %xmm2, %xmm4
+; X64-NEXT:    psrlq %xmm3, %xmm4
+; X64-NEXT:    movl $31, %ecx
+; X64-NEXT:    movd %ecx, %xmm5
+; X64-NEXT:    psrlq %xmm5, %xmm2
+; X64-NEXT:    movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
+; X64-NEXT:    movdqa %xmm1, %xmm4
+; X64-NEXT:    psrlq %xmm3, %xmm4
+; X64-NEXT:    psrlq %xmm5, %xmm1
+; X64-NEXT:    movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
+; X64-NEXT:    xorpd %xmm2, %xmm1
+; X64-NEXT:    psubq %xmm2, %xmm1
+; X64-NEXT:    pand {{\.LCPI.*}}, %xmm1
+; X64-NEXT:    psrlq $29, %xmm1
+; X64-NEXT:    paddq %xmm0, %xmm1
+; X64-NEXT:    psllq $32, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; X64-NEXT:    psrad $31, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    psrlq $3, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%eax)
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = sdiv <2 x i32> %a, <i32 8, i32 8>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_srem_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
+; X86-LABEL: test_srem_pow2_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    psrlq $3, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rsi)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_srem_pow2_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X64-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    psrlq $3, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%eax)
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = udiv <2 x i32> %a, <i32 8, i32 8>
+  store <2 x i32> %b, <2 x i32>* %y
+  ret void
+}
+
+define void @test_urem_v2i32(<2 x i32>* %x, <2 x i32>* %y, <2 x i32>* %z) nounwind {
+; X86-LABEL: test_urem_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq %rdx, %rcx
+; X86-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    pxor %xmm1, %xmm1
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movq %xmm0, %rax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    divq %rax
+; X86-NEXT:    movq %rdx, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X86-NEXT:    movq %xmm0, %rax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    divq %rax
+; X86-NEXT:    movq %rdx, %xmm0
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rcx)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_urem_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $40, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    movss %xmm0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    calll __umoddi3
+; X64-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,0,1]
+; X64-NEXT:    movss %xmm0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movss %xmm0, (%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movl $0, {{[0-9]+}}(%esp)
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X64-NEXT:    calll __umoddi3
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $40, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = load <2 x i32>, <2 x i32>* %x
+  %c = urem <2 x i32> %a, %b
+  store <2 x i32> %c, <2 x i32>* %z
+  ret void
+}
+
+define void @test_sdiv_v2i32(<2 x i32>* %x, <2 x i32>* %y, <2 x i32>* %z) nounwind {
+; X86-LABEL: test_sdiv_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq %rdx, %rcx
+; X86-NEXT:    movslq (%rdi), %rsi
+; X86-NEXT:    movslq 4(%rdi), %rdi
+; X86-NEXT:    movq %rdi, %rax
+; X86-NEXT:    cqto
+; X86-NEXT:    idivq %rdi
+; X86-NEXT:    movq %rax, %xmm0
+; X86-NEXT:    movq %rsi, %rax
+; X86-NEXT:    cqto
+; X86-NEXT:    idivq %rsi
+; X86-NEXT:    movq %rax, %xmm1
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rcx)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_sdiv_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $16, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl (%eax), %edi
+; X64-NEXT:    movl 4(%eax), %eax
+; X64-NEXT:    movl %edi, %ebx
+; X64-NEXT:    sarl $31, %ebx
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    sarl $31, %ecx
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, (%esp) # 16-byte Spill
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu (%esp), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    popl %edi
+; X64-NEXT:    popl %ebx
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = load <2 x i32>, <2 x i32>* %x
+  %c = sdiv <2 x i32> %a, %b
+  store <2 x i32> %c, <2 x i32>* %z
+  ret void
+}
+
+define void @test_srem_v2i32(<2 x i32>* %x, <2 x i32>* %y, <2 x i32>* %z) nounwind {
+; X86-LABEL: test_srem_v2i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movq %rdx, %rcx
+; X86-NEXT:    movslq (%rdi), %rsi
+; X86-NEXT:    movslq 4(%rdi), %rdi
+; X86-NEXT:    movq %rdi, %rax
+; X86-NEXT:    cqto
+; X86-NEXT:    idivq %rdi
+; X86-NEXT:    movq %rax, %xmm0
+; X86-NEXT:    movq %rsi, %rax
+; X86-NEXT:    cqto
+; X86-NEXT:    idivq %rsi
+; X86-NEXT:    movq %rax, %xmm1
+; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-NEXT:    movq %xmm0, (%rcx)
+; X86-NEXT:    retq
+;
+; X64-LABEL: test_srem_v2i32:
+; X64:       # %bb.0:
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %esi
+; X64-NEXT:    subl $16, %esp
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X64-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X64-NEXT:    movl (%eax), %edi
+; X64-NEXT:    movl 4(%eax), %eax
+; X64-NEXT:    movl %edi, %ebx
+; X64-NEXT:    sarl $31, %ebx
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    sarl $31, %ecx
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    pushl %ecx
+; X64-NEXT:    pushl %eax
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu %xmm0, (%esp) # 16-byte Spill
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    pushl %ebx
+; X64-NEXT:    pushl %edi
+; X64-NEXT:    calll __divdi3
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    movdqu (%esp), %xmm1 # 16-byte Reload
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-NEXT:    movq %xmm0, (%esi)
+; X64-NEXT:    addl $16, %esp
+; X64-NEXT:    popl %esi
+; X64-NEXT:    popl %edi
+; X64-NEXT:    popl %ebx
+; X64-NEXT:    retl
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = load <2 x i32>, <2 x i32>* %x
+  %c = sdiv <2 x i32> %a, %b
+  store <2 x i32> %c, <2 x i32>* %z
+  ret void
+}




More information about the llvm-commits mailing list