[llvm] r286577 - [X86] Add knownbits vector UREM/SREM tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 11 03:11:40 PST 2016
Author: rksimon
Date: Fri Nov 11 05:11:40 2016
New Revision: 286577
URL: http://llvm.org/viewvc/llvm-project?rev=286577&view=rev
Log:
[X86] Add knownbits vector UREM/SREM tests
In preparation for demandedelts support
Modified:
llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
Modified: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits-vector.ll?rev=286577&r1=286576&r2=286577&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll Fri Nov 11 05:11:40 2016
@@ -255,3 +255,119 @@ define <4 x i32> @knownbits_mask_udiv_sh
%4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
ret <4 x i32> %4
}
+
+define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind {
+; X32-LABEL: knownbits_urem_lshr:
+; X32: # BB#0:
+; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_urem_lshr:
+; X64: # BB#0:
+; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = urem <4 x i32> %a0, <i32 16, i32 16, i32 16, i32 16>
+ %2 = lshr <4 x i32> %1, <i32 22, i32 22, i32 22, i32 22>
+ ret <4 x i32> %2
+}
+
+define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; X32-LABEL: knownbits_mask_urem_shuffle_lshr:
+; X32: # BB#0:
+; X32-NEXT: pushl %esi
+; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767]
+; X32-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X32-NEXT: vpand %xmm2, %xmm1, %xmm1
+; X32-NEXT: vpextrd $1, %xmm0, %eax
+; X32-NEXT: vpextrd $1, %xmm1, %ecx
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: divl %ecx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: vmovd %xmm0, %eax
+; X32-NEXT: vmovd %xmm1, %esi
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: divl %esi
+; X32-NEXT: vmovd %edx, %xmm2
+; X32-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; X32-NEXT: vpextrd $2, %xmm0, %eax
+; X32-NEXT: vpextrd $2, %xmm1, %ecx
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: divl %ecx
+; X32-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
+; X32-NEXT: vpextrd $3, %xmm0, %eax
+; X32-NEXT: vpextrd $3, %xmm1, %ecx
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: divl %ecx
+; X32-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
+; X32-NEXT: vpsrld $22, %xmm0, %xmm0
+; X32-NEXT: popl %esi
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_mask_urem_shuffle_lshr:
+; X64: # BB#0:
+; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767]
+; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
+; X64-NEXT: vpand %xmm2, %xmm1, %xmm1
+; X64-NEXT: vpextrd $1, %xmm0, %eax
+; X64-NEXT: vpextrd $1, %xmm1, %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: movl %edx, %ecx
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: vmovd %xmm1, %esi
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: vmovd %edx, %xmm2
+; X64-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
+; X64-NEXT: vpextrd $2, %xmm0, %eax
+; X64-NEXT: vpextrd $2, %xmm1, %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
+; X64-NEXT: vpextrd $3, %xmm0, %eax
+; X64-NEXT: vpextrd $3, %xmm1, %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
+; X64-NEXT: vpsrld $22, %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
+ %2 = and <4 x i32> %a1, <i32 32767, i32 -1, i32 -1, i32 32767>
+ %3 = urem <4 x i32> %1, %2
+ %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
+ %5 = lshr <4 x i32> %4, <i32 22, i32 22, i32 22, i32 22>
+ ret <4 x i32> %5
+}
+
+define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind {
+; X32-LABEL: knownbits_mask_srem_shuffle_lshr:
+; X32: # BB#0:
+; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpsrad $31, %xmm0, %xmm1
+; X32-NEXT: vpsrld $28, %xmm1, %xmm1
+; X32-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X32-NEXT: vpand {{\.LCPI.*}}, %xmm1, %xmm1
+; X32-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
+; X32-NEXT: vpsrld $22, %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: knownbits_mask_srem_shuffle_lshr:
+; X64: # BB#0:
+; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vpsrad $31, %xmm0, %xmm1
+; X64-NEXT: vpsrld $28, %xmm1, %xmm1
+; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
+; X64-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; X64-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
+; X64-NEXT: vpsrld $22, %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = and <4 x i32> %a0, <i32 -32768, i32 -1, i32 -1, i32 -32768>
+ %2 = srem <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
+ %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
+ %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
+ ret <4 x i32> %4
+}
More information about the llvm-commits
mailing list