[llvm] r341108 - [X86] Add -x86-experimental-vector-widening-legalization command lines to vector-idiv-v2i32.ll
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 30 13:10:10 PDT 2018
Author: ctopper
Date: Thu Aug 30 13:10:10 2018
New Revision: 341108
URL: http://llvm.org/viewvc/llvm-project?rev=341108&view=rev
Log:
[X86] Add -x86-experimental-vector-widening-legalization command lines to vector-idiv-v2i32.ll
If we're legalizing via widening already, then the type legalizer will scalarize the divs/rems as i32.
Modified:
llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll?rev=341108&r1=341107&r2=341108&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-v2i32.ll Thu Aug 30 13:10:10 2018
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=X64_WIDEN
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=X86_WIDEN
define void @test_udiv7_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
; X64-LABEL: test_udiv7_v2i32:
@@ -61,6 +63,59 @@ define void @test_udiv7_v2i32(<2 x i32>*
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_udiv7_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: subl %ecx, %eax
+; X64_WIDEN-NEXT: shrl %eax
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: shrl $2, %eax
+; X64_WIDEN-NEXT: movd %eax, %xmm1
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: subl %ecx, %eax
+; X64_WIDEN-NEXT: shrl %eax
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: shrl $2, %eax
+; X64_WIDEN-NEXT: movd %eax, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm1, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_udiv7_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl (%eax), %ecx
+; X86_WIDEN-NEXT: movl 4(%eax), %esi
+; X86_WIDEN-NEXT: movl $613566757, %ebx # imm = 0x24924925
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: mull %ebx
+; X86_WIDEN-NEXT: subl %edx, %ecx
+; X86_WIDEN-NEXT: shrl %ecx
+; X86_WIDEN-NEXT: addl %edx, %ecx
+; X86_WIDEN-NEXT: shrl $2, %ecx
+; X86_WIDEN-NEXT: movl %esi, %eax
+; X86_WIDEN-NEXT: mull %ebx
+; X86_WIDEN-NEXT: subl %edx, %esi
+; X86_WIDEN-NEXT: shrl %esi
+; X86_WIDEN-NEXT: addl %edx, %esi
+; X86_WIDEN-NEXT: shrl $2, %esi
+; X86_WIDEN-NEXT: movl %esi, 4(%edi)
+; X86_WIDEN-NEXT: movl %ecx, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = udiv <2 x i32> %a, <i32 7, i32 7>
store <2 x i32> %b, <2 x i32>* %y
@@ -134,6 +189,77 @@ define void @test_urem7_v2i32(<2 x i32>*
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_urem7_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: movl %eax, %edx
+; X64_WIDEN-NEXT: subl %ecx, %edx
+; X64_WIDEN-NEXT: shrl %edx
+; X64_WIDEN-NEXT: addl %ecx, %edx
+; X64_WIDEN-NEXT: shrl $2, %edx
+; X64_WIDEN-NEXT: leal (,%rdx,8), %ecx
+; X64_WIDEN-NEXT: subl %ecx, %edx
+; X64_WIDEN-NEXT: addl %eax, %edx
+; X64_WIDEN-NEXT: movd %edx, %xmm1
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: movl %eax, %edx
+; X64_WIDEN-NEXT: subl %ecx, %edx
+; X64_WIDEN-NEXT: shrl %edx
+; X64_WIDEN-NEXT: addl %ecx, %edx
+; X64_WIDEN-NEXT: shrl $2, %edx
+; X64_WIDEN-NEXT: leal (,%rdx,8), %ecx
+; X64_WIDEN-NEXT: subl %ecx, %edx
+; X64_WIDEN-NEXT: addl %eax, %edx
+; X64_WIDEN-NEXT: movd %edx, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm1, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_urem7_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebp
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl (%eax), %esi
+; X86_WIDEN-NEXT: movl 4(%eax), %ecx
+; X86_WIDEN-NEXT: movl $613566757, %ebx # imm = 0x24924925
+; X86_WIDEN-NEXT: movl %esi, %eax
+; X86_WIDEN-NEXT: mull %ebx
+; X86_WIDEN-NEXT: movl %esi, %ebp
+; X86_WIDEN-NEXT: subl %edx, %ebp
+; X86_WIDEN-NEXT: shrl %ebp
+; X86_WIDEN-NEXT: addl %edx, %ebp
+; X86_WIDEN-NEXT: shrl $2, %ebp
+; X86_WIDEN-NEXT: leal (,%ebp,8), %eax
+; X86_WIDEN-NEXT: subl %eax, %ebp
+; X86_WIDEN-NEXT: addl %esi, %ebp
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: mull %ebx
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: subl %edx, %eax
+; X86_WIDEN-NEXT: shrl %eax
+; X86_WIDEN-NEXT: addl %edx, %eax
+; X86_WIDEN-NEXT: shrl $2, %eax
+; X86_WIDEN-NEXT: leal (,%eax,8), %edx
+; X86_WIDEN-NEXT: subl %edx, %eax
+; X86_WIDEN-NEXT: addl %ecx, %eax
+; X86_WIDEN-NEXT: movl %eax, 4(%edi)
+; X86_WIDEN-NEXT: movl %ebp, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: popl %ebp
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = urem <2 x i32> %a, <i32 7, i32 7>
store <2 x i32> %b, <2 x i32>* %y
@@ -202,6 +328,68 @@ define void @test_sdiv7_v2i32(<2 x i32>*
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_sdiv7_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: cltq
+; X64_WIDEN-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: movl %eax, %ecx
+; X64_WIDEN-NEXT: shrl $31, %ecx
+; X64_WIDEN-NEXT: sarl $2, %eax
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: movd %eax, %xmm1
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: cltq
+; X64_WIDEN-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: movl %eax, %ecx
+; X64_WIDEN-NEXT: shrl $31, %ecx
+; X64_WIDEN-NEXT: sarl $2, %eax
+; X64_WIDEN-NEXT: addl %ecx, %eax
+; X64_WIDEN-NEXT: movd %eax, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm1, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_sdiv7_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebp
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl (%eax), %ecx
+; X86_WIDEN-NEXT: movl 4(%eax), %esi
+; X86_WIDEN-NEXT: movl $-1840700269, %ebp # imm = 0x92492493
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: imull %ebp
+; X86_WIDEN-NEXT: movl %edx, %edi
+; X86_WIDEN-NEXT: addl %ecx, %edi
+; X86_WIDEN-NEXT: movl %edi, %eax
+; X86_WIDEN-NEXT: shrl $31, %eax
+; X86_WIDEN-NEXT: sarl $2, %edi
+; X86_WIDEN-NEXT: addl %eax, %edi
+; X86_WIDEN-NEXT: movl %esi, %eax
+; X86_WIDEN-NEXT: imull %ebp
+; X86_WIDEN-NEXT: addl %esi, %edx
+; X86_WIDEN-NEXT: movl %edx, %eax
+; X86_WIDEN-NEXT: shrl $31, %eax
+; X86_WIDEN-NEXT: sarl $2, %edx
+; X86_WIDEN-NEXT: addl %eax, %edx
+; X86_WIDEN-NEXT: movl %edx, 4(%ebx)
+; X86_WIDEN-NEXT: movl %edi, (%ebx)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: popl %ebp
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = sdiv <2 x i32> %a, <i32 7, i32 7>
store <2 x i32> %b, <2 x i32>* %y
@@ -277,6 +465,80 @@ define void @test_srem7_v2i32(<2 x i32>*
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_srem7_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: cltq
+; X64_WIDEN-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: addl %eax, %ecx
+; X64_WIDEN-NEXT: movl %ecx, %edx
+; X64_WIDEN-NEXT: shrl $31, %edx
+; X64_WIDEN-NEXT: sarl $2, %ecx
+; X64_WIDEN-NEXT: addl %edx, %ecx
+; X64_WIDEN-NEXT: leal (,%rcx,8), %edx
+; X64_WIDEN-NEXT: subl %edx, %ecx
+; X64_WIDEN-NEXT: addl %eax, %ecx
+; X64_WIDEN-NEXT: movd %ecx, %xmm1
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: cltq
+; X64_WIDEN-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
+; X64_WIDEN-NEXT: shrq $32, %rcx
+; X64_WIDEN-NEXT: addl %eax, %ecx
+; X64_WIDEN-NEXT: movl %ecx, %edx
+; X64_WIDEN-NEXT: shrl $31, %edx
+; X64_WIDEN-NEXT: sarl $2, %ecx
+; X64_WIDEN-NEXT: addl %edx, %ecx
+; X64_WIDEN-NEXT: leal (,%rcx,8), %edx
+; X64_WIDEN-NEXT: subl %edx, %ecx
+; X64_WIDEN-NEXT: addl %eax, %ecx
+; X64_WIDEN-NEXT: movd %ecx, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm1, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_srem7_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebp
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl (%eax), %edi
+; X86_WIDEN-NEXT: movl 4(%eax), %ecx
+; X86_WIDEN-NEXT: movl $-1840700269, %ebp # imm = 0x92492493
+; X86_WIDEN-NEXT: movl %edi, %eax
+; X86_WIDEN-NEXT: imull %ebp
+; X86_WIDEN-NEXT: movl %edx, %esi
+; X86_WIDEN-NEXT: addl %edi, %esi
+; X86_WIDEN-NEXT: movl %esi, %eax
+; X86_WIDEN-NEXT: shrl $31, %eax
+; X86_WIDEN-NEXT: sarl $2, %esi
+; X86_WIDEN-NEXT: addl %eax, %esi
+; X86_WIDEN-NEXT: leal (,%esi,8), %eax
+; X86_WIDEN-NEXT: subl %eax, %esi
+; X86_WIDEN-NEXT: addl %edi, %esi
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: imull %ebp
+; X86_WIDEN-NEXT: addl %ecx, %edx
+; X86_WIDEN-NEXT: movl %edx, %eax
+; X86_WIDEN-NEXT: shrl $31, %eax
+; X86_WIDEN-NEXT: sarl $2, %edx
+; X86_WIDEN-NEXT: addl %eax, %edx
+; X86_WIDEN-NEXT: leal (,%edx,8), %eax
+; X86_WIDEN-NEXT: subl %eax, %edx
+; X86_WIDEN-NEXT: addl %ecx, %edx
+; X86_WIDEN-NEXT: movl %edx, 4(%ebx)
+; X86_WIDEN-NEXT: movl %esi, (%ebx)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: popl %ebp
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = srem <2 x i32> %a, <i32 7, i32 7>
store <2 x i32> %b, <2 x i32>* %y
@@ -305,6 +567,24 @@ define void @test_udiv_pow2_v2i32(<2 x i
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: movq %xmm0, (%eax)
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_udiv_pow2_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: psrld $3, %xmm0
+; X64_WIDEN-NEXT: movq %xmm0, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_udiv_pow2_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86_WIDEN-NEXT: psrld $3, %xmm0
+; X86_WIDEN-NEXT: movd %xmm0, (%eax)
+; X86_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86_WIDEN-NEXT: movd %xmm0, 4(%eax)
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = udiv <2 x i32> %a, <i32 8, i32 8>
store <2 x i32> %b, <2 x i32>* %y
@@ -335,6 +615,24 @@ define void @test_urem_pow2_v2i32(<2 x i
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; X86-NEXT: movq %xmm0, (%eax)
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_urem_pow2_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: andps {{.*}}(%rip), %xmm0
+; X64_WIDEN-NEXT: movlps %xmm0, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_urem_pow2_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86_WIDEN-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86_WIDEN-NEXT: movd %xmm0, (%eax)
+; X86_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86_WIDEN-NEXT: movd %xmm0, 4(%eax)
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = urem <2 x i32> %a, <i32 8, i32 8>
store <2 x i32> %b, <2 x i32>* %y
@@ -403,6 +701,32 @@ define void @test_sdiv_pow2_v2i32(<2 x i
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: movq %xmm0, (%eax)
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_sdiv_pow2_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movdqa %xmm0, %xmm1
+; X64_WIDEN-NEXT: psrad $31, %xmm1
+; X64_WIDEN-NEXT: psrld $29, %xmm1
+; X64_WIDEN-NEXT: paddd %xmm0, %xmm1
+; X64_WIDEN-NEXT: psrad $3, %xmm1
+; X64_WIDEN-NEXT: movq %xmm1, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_sdiv_pow2_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86_WIDEN-NEXT: movdqa %xmm0, %xmm1
+; X86_WIDEN-NEXT: psrad $31, %xmm1
+; X86_WIDEN-NEXT: psrld $29, %xmm1
+; X86_WIDEN-NEXT: paddd %xmm0, %xmm1
+; X86_WIDEN-NEXT: psrad $3, %xmm1
+; X86_WIDEN-NEXT: movd %xmm1, (%eax)
+; X86_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; X86_WIDEN-NEXT: movd %xmm0, 4(%eax)
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = sdiv <2 x i32> %a, <i32 8, i32 8>
store <2 x i32> %b, <2 x i32>* %y
@@ -431,6 +755,24 @@ define void @test_srem_pow2_v2i32(<2 x i
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: movq %xmm0, (%eax)
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_srem_pow2_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: psrld $3, %xmm0
+; X64_WIDEN-NEXT: movq %xmm0, (%rsi)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_srem_pow2_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X86_WIDEN-NEXT: psrld $3, %xmm0
+; X86_WIDEN-NEXT: movd %xmm0, (%eax)
+; X86_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86_WIDEN-NEXT: movd %xmm0, 4(%eax)
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = udiv <2 x i32> %a, <i32 8, i32 8>
store <2 x i32> %b, <2 x i32>* %y
@@ -503,6 +845,50 @@ define void @test_udiv_v2i32(<2 x i32>*
; X86-NEXT: addl $56, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_udiv_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq %rdx, %rcx
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: movd %xmm1, %esi
+; X64_WIDEN-NEXT: xorl %edx, %edx
+; X64_WIDEN-NEXT: divl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm2
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %esi
+; X64_WIDEN-NEXT: xorl %edx, %edx
+; X64_WIDEN-NEXT: divl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm2, (%rcx)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_udiv_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movl (%ecx), %eax
+; X86_WIDEN-NEXT: movl 4(%ecx), %ecx
+; X86_WIDEN-NEXT: xorl %edx, %edx
+; X86_WIDEN-NEXT: divl (%ebx)
+; X86_WIDEN-NEXT: movl %eax, %esi
+; X86_WIDEN-NEXT: xorl %edx, %edx
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: divl 4(%ebx)
+; X86_WIDEN-NEXT: movl %eax, 4(%edi)
+; X86_WIDEN-NEXT: movl %esi, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = load <2 x i32>, <2 x i32>* %y
%c = udiv <2 x i32> %a, %b
@@ -576,6 +962,50 @@ define void @test_urem_v2i32(<2 x i32>*
; X86-NEXT: addl $56, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_urem_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq %rdx, %rcx
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: movd %xmm1, %esi
+; X64_WIDEN-NEXT: xorl %edx, %edx
+; X64_WIDEN-NEXT: divl %esi
+; X64_WIDEN-NEXT: movd %edx, %xmm2
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %esi
+; X64_WIDEN-NEXT: xorl %edx, %edx
+; X64_WIDEN-NEXT: divl %esi
+; X64_WIDEN-NEXT: movd %edx, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm2, (%rcx)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_urem_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movl (%ecx), %eax
+; X86_WIDEN-NEXT: movl 4(%ecx), %ecx
+; X86_WIDEN-NEXT: xorl %edx, %edx
+; X86_WIDEN-NEXT: divl (%ebx)
+; X86_WIDEN-NEXT: movl %edx, %esi
+; X86_WIDEN-NEXT: xorl %edx, %edx
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: divl 4(%ebx)
+; X86_WIDEN-NEXT: movl %edx, 4(%edi)
+; X86_WIDEN-NEXT: movl %esi, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = load <2 x i32>, <2 x i32>* %y
%c = urem <2 x i32> %a, %b
@@ -650,6 +1080,50 @@ define void @test_sdiv_v2i32(<2 x i32>*
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_sdiv_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq %rdx, %rcx
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: movd %xmm1, %esi
+; X64_WIDEN-NEXT: cltd
+; X64_WIDEN-NEXT: idivl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm2
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %esi
+; X64_WIDEN-NEXT: cltd
+; X64_WIDEN-NEXT: idivl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm2, (%rcx)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_sdiv_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movl (%ecx), %eax
+; X86_WIDEN-NEXT: movl 4(%ecx), %ecx
+; X86_WIDEN-NEXT: cltd
+; X86_WIDEN-NEXT: idivl (%ebx)
+; X86_WIDEN-NEXT: movl %eax, %esi
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: cltd
+; X86_WIDEN-NEXT: idivl 4(%ebx)
+; X86_WIDEN-NEXT: movl %eax, 4(%edi)
+; X86_WIDEN-NEXT: movl %esi, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = load <2 x i32>, <2 x i32>* %y
%c = sdiv <2 x i32> %a, %b
@@ -724,6 +1198,50 @@ define void @test_srem_v2i32(<2 x i32>*
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
+;
+; X64_WIDEN-LABEL: test_srem_v2i32:
+; X64_WIDEN: # %bb.0:
+; X64_WIDEN-NEXT: movq %rdx, %rcx
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64_WIDEN-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: movd %xmm1, %esi
+; X64_WIDEN-NEXT: cltd
+; X64_WIDEN-NEXT: idivl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm2
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %eax
+; X64_WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; X64_WIDEN-NEXT: movd %xmm0, %esi
+; X64_WIDEN-NEXT: cltd
+; X64_WIDEN-NEXT: idivl %esi
+; X64_WIDEN-NEXT: movd %eax, %xmm0
+; X64_WIDEN-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64_WIDEN-NEXT: movq %xmm2, (%rcx)
+; X64_WIDEN-NEXT: retq
+;
+; X86_WIDEN-LABEL: test_srem_v2i32:
+; X86_WIDEN: # %bb.0:
+; X86_WIDEN-NEXT: pushl %ebx
+; X86_WIDEN-NEXT: pushl %edi
+; X86_WIDEN-NEXT: pushl %esi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86_WIDEN-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86_WIDEN-NEXT: movl (%ecx), %eax
+; X86_WIDEN-NEXT: movl 4(%ecx), %ecx
+; X86_WIDEN-NEXT: cltd
+; X86_WIDEN-NEXT: idivl (%ebx)
+; X86_WIDEN-NEXT: movl %eax, %esi
+; X86_WIDEN-NEXT: movl %ecx, %eax
+; X86_WIDEN-NEXT: cltd
+; X86_WIDEN-NEXT: idivl 4(%ebx)
+; X86_WIDEN-NEXT: movl %eax, 4(%edi)
+; X86_WIDEN-NEXT: movl %esi, (%edi)
+; X86_WIDEN-NEXT: popl %esi
+; X86_WIDEN-NEXT: popl %edi
+; X86_WIDEN-NEXT: popl %ebx
+; X86_WIDEN-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
%b = load <2 x i32>, <2 x i32>* %y
%c = sdiv <2 x i32> %a, %b
More information about the llvm-commits
mailing list