[llvm] r339120 - [X86][SSE] Add more non-uniform exact sdiv vector tests covering all/none ashr paths
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 7 02:31:22 PDT 2018
Author: rksimon
Date: Tue Aug 7 02:31:22 2018
New Revision: 339120
URL: http://llvm.org/viewvc/llvm-project?rev=339120&view=rev
Log:
[X86][SSE] Add more non-uniform exact sdiv vector tests covering all/none ashr paths
Modified:
llvm/trunk/test/CodeGen/X86/sdiv-exact.ll
Modified: llvm/trunk/test/CodeGen/X86/sdiv-exact.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sdiv-exact.ll?rev=339120&r1=339119&r2=339120&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sdiv-exact.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sdiv-exact.ll Tue Aug 7 02:31:22 2018
@@ -123,3 +123,97 @@ define <4 x i32> @test5(<4 x i32> %x) {
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
ret <4 x i32> %div
}
+
+define <4 x i32> @test6(<4 x i32> %x) {
+; X86-LABEL: test6:
+; X86: # %bb.0:
+; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X86-NEXT: movd %xmm1, %eax
+; X86-NEXT: sarl %eax
+; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
+; X86-NEXT: movd %eax, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X86-NEXT: movd %xmm2, %eax
+; X86-NEXT: sarl %eax
+; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
+; X86-NEXT: movd %eax, %xmm2
+; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: sarl $3, %eax
+; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
+; X86-NEXT: movd %eax, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: sarl $3, %eax
+; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-NEXT: movdqa %xmm1, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test6:
+; X64: # %bb.0:
+; X64-NEXT: vpextrd $1, %xmm0, %eax
+; X64-NEXT: sarl $3, %eax
+; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
+; X64-NEXT: vmovd %xmm0, %ecx
+; X64-NEXT: sarl $3, %ecx
+; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
+; X64-NEXT: vmovd %ecx, %xmm1
+; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X64-NEXT: vpextrd $2, %xmm0, %eax
+; X64-NEXT: sarl %eax
+; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
+; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; X64-NEXT: vpextrd $3, %xmm0, %eax
+; X64-NEXT: sarl %eax
+; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
+; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT: retq
+ %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
+ ret <4 x i32> %div
+}
+
+define <4 x i32> @test7(<4 x i32> %x) {
+; X86-LABEL: test7:
+; X86: # %bb.0:
+; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X86-NEXT: movd %xmm1, %eax
+; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
+; X86-NEXT: movd %eax, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X86-NEXT: movd %xmm2, %eax
+; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
+; X86-NEXT: movd %eax, %xmm2
+; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
+; X86-NEXT: movd %eax, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-NEXT: movdqa %xmm1, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: test7:
+; X64: # %bb.0:
+; X64-NEXT: vpextrd $1, %xmm0, %eax
+; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
+; X64-NEXT: vmovd %xmm0, %ecx
+; X64-NEXT: imull $-1030792151, %ecx, %ecx # imm = 0xC28F5C29
+; X64-NEXT: vmovd %ecx, %xmm1
+; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; X64-NEXT: vpextrd $2, %xmm0, %eax
+; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
+; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; X64-NEXT: vpextrd $3, %xmm0, %eax
+; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13
+; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT: retq
+ %div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
+ ret <4 x i32> %div
+}
More information about the llvm-commits
mailing list