[llvm] adcc557 - [X86] rem-vector-lkk.ll - improve CPU coverage to cover all x86-64 levels (#169805)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 27 06:31:37 PST 2025
Author: Simon Pilgrim
Date: 2025-11-27T14:31:32Z
New Revision: adcc557ef176ee36a1e5df12b60789b6fa2fe73c
URL: https://github.com/llvm/llvm-project/commit/adcc557ef176ee36a1e5df12b60789b6fa2fe73c
DIFF: https://github.com/llvm/llvm-project/commit/adcc557ef176ee36a1e5df12b60789b6fa2fe73c.diff
LOG: [X86] rem-vector-lkk.ll - improve CPU coverage to cover all x86-64 levels (#169805)
SSE2/SSE42/AVX1/AVX2 + x86-64-v4 (AVX512)
Added:
Modified:
llvm/test/CodeGen/X86/srem-vector-lkk.ll
llvm/test/CodeGen/X86/urem-vector-lkk.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/srem-vector-lkk.ll b/llvm/test/CodeGen/X86/srem-vector-lkk.ll
index e936e1ef81b74..0fb6eb3c58893 100644
--- a/llvm/test/CodeGen/X86/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/X86/srem-vector-lkk.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) {
; SSE-LABEL: fold_srem_vec_1:
@@ -55,55 +57,105 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) {
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: fold_srem_vec_1:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NEXT: movswl %ax, %ecx
-; AVX-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51
-; AVX-NEXT: shrl $16, %ecx
-; AVX-NEXT: subl %eax, %ecx
-; AVX-NEXT: movzwl %cx, %ecx
-; AVX-NEXT: movswl %cx, %edx
-; AVX-NEXT: shrl $15, %ecx
-; AVX-NEXT: sarl $9, %edx
-; AVX-NEXT: addl %ecx, %edx
-; AVX-NEXT: imull $-1003, %edx, %ecx # imm = 0xFC15
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: movswl %cx, %edx
-; AVX-NEXT: imull $-21385, %edx, %edx # imm = 0xAC77
-; AVX-NEXT: shrl $16, %edx
-; AVX-NEXT: addl %ecx, %edx
-; AVX-NEXT: movzwl %dx, %edx
-; AVX-NEXT: movswl %dx, %esi
-; AVX-NEXT: shrl $15, %edx
-; AVX-NEXT: sarl $6, %esi
-; AVX-NEXT: addl %edx, %esi
-; AVX-NEXT: imull $95, %esi, %edx
-; AVX-NEXT: subl %edx, %ecx
-; AVX-NEXT: vmovd %ecx, %xmm1
-; AVX-NEXT: vpextrw $1, %xmm0, %ecx
-; AVX-NEXT: movswl %cx, %edx
-; AVX-NEXT: imull $-16913, %edx, %edx # imm = 0xBDEF
-; AVX-NEXT: movl %edx, %esi
-; AVX-NEXT: shrl $31, %esi
-; AVX-NEXT: sarl $21, %edx
-; AVX-NEXT: addl %esi, %edx
-; AVX-NEXT: imull $-124, %edx, %edx
-; AVX-NEXT: subl %edx, %ecx
-; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrw $2, %xmm0, %ecx
-; AVX-NEXT: movswl %cx, %edx
-; AVX-NEXT: imull $2675, %edx, %edx # imm = 0xA73
-; AVX-NEXT: movl %edx, %esi
-; AVX-NEXT: shrl $31, %esi
-; AVX-NEXT: sarl $18, %edx
-; AVX-NEXT: addl %esi, %edx
-; AVX-NEXT: imull $98, %edx, %edx
-; AVX-NEXT: subl %edx, %ecx
-; AVX-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm0
-; AVX-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1OR2-LABEL: fold_srem_vec_1:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1OR2-NEXT: movswl %ax, %ecx
+; AVX1OR2-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51
+; AVX1OR2-NEXT: shrl $16, %ecx
+; AVX1OR2-NEXT: subl %eax, %ecx
+; AVX1OR2-NEXT: movzwl %cx, %ecx
+; AVX1OR2-NEXT: movswl %cx, %edx
+; AVX1OR2-NEXT: shrl $15, %ecx
+; AVX1OR2-NEXT: sarl $9, %edx
+; AVX1OR2-NEXT: addl %ecx, %edx
+; AVX1OR2-NEXT: imull $-1003, %edx, %ecx # imm = 0xFC15
+; AVX1OR2-NEXT: subl %ecx, %eax
+; AVX1OR2-NEXT: vmovd %xmm0, %ecx
+; AVX1OR2-NEXT: movswl %cx, %edx
+; AVX1OR2-NEXT: imull $-21385, %edx, %edx # imm = 0xAC77
+; AVX1OR2-NEXT: shrl $16, %edx
+; AVX1OR2-NEXT: addl %ecx, %edx
+; AVX1OR2-NEXT: movzwl %dx, %edx
+; AVX1OR2-NEXT: movswl %dx, %esi
+; AVX1OR2-NEXT: shrl $15, %edx
+; AVX1OR2-NEXT: sarl $6, %esi
+; AVX1OR2-NEXT: addl %edx, %esi
+; AVX1OR2-NEXT: imull $95, %esi, %edx
+; AVX1OR2-NEXT: subl %edx, %ecx
+; AVX1OR2-NEXT: vmovd %ecx, %xmm1
+; AVX1OR2-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX1OR2-NEXT: movswl %cx, %edx
+; AVX1OR2-NEXT: imull $-16913, %edx, %edx # imm = 0xBDEF
+; AVX1OR2-NEXT: movl %edx, %esi
+; AVX1OR2-NEXT: shrl $31, %esi
+; AVX1OR2-NEXT: sarl $21, %edx
+; AVX1OR2-NEXT: addl %esi, %edx
+; AVX1OR2-NEXT: imull $-124, %edx, %edx
+; AVX1OR2-NEXT: subl %edx, %ecx
+; AVX1OR2-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX1OR2-NEXT: movswl %cx, %edx
+; AVX1OR2-NEXT: imull $2675, %edx, %edx # imm = 0xA73
+; AVX1OR2-NEXT: movl %edx, %esi
+; AVX1OR2-NEXT: shrl $31, %esi
+; AVX1OR2-NEXT: sarl $18, %edx
+; AVX1OR2-NEXT: addl %esi, %edx
+; AVX1OR2-NEXT: imull $98, %edx, %edx
+; AVX1OR2-NEXT: subl %edx, %ecx
+; AVX1OR2-NEXT: vpinsrw $2, %ecx, %xmm1, %xmm0
+; AVX1OR2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: fold_srem_vec_1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpextrw $3, %xmm0, %eax
+; AVX512-NEXT: movswl %ax, %ecx
+; AVX512-NEXT: imull $32081, %ecx, %ecx # imm = 0x7D51
+; AVX512-NEXT: shrl $16, %ecx
+; AVX512-NEXT: subl %eax, %ecx
+; AVX512-NEXT: movzwl %cx, %edx
+; AVX512-NEXT: movswl %dx, %ecx
+; AVX512-NEXT: shrl $15, %edx
+; AVX512-NEXT: sarl $9, %ecx
+; AVX512-NEXT: addl %edx, %ecx
+; AVX512-NEXT: vmovd %xmm0, %edx
+; AVX512-NEXT: movswl %dx, %esi
+; AVX512-NEXT: imull $-21385, %esi, %esi # imm = 0xAC77
+; AVX512-NEXT: shrl $16, %esi
+; AVX512-NEXT: addl %edx, %esi
+; AVX512-NEXT: movzwl %si, %esi
+; AVX512-NEXT: movswl %si, %edi
+; AVX512-NEXT: shrl $15, %esi
+; AVX512-NEXT: sarl $6, %edi
+; AVX512-NEXT: addl %esi, %edi
+; AVX512-NEXT: imull $95, %edi, %esi
+; AVX512-NEXT: subl %esi, %edx
+; AVX512-NEXT: vmovd %edx, %xmm1
+; AVX512-NEXT: vpextrw $1, %xmm0, %edx
+; AVX512-NEXT: movswl %dx, %esi
+; AVX512-NEXT: imull $-16913, %esi, %esi # imm = 0xBDEF
+; AVX512-NEXT: movl %esi, %edi
+; AVX512-NEXT: shrl $31, %edi
+; AVX512-NEXT: sarl $21, %esi
+; AVX512-NEXT: addl %edi, %esi
+; AVX512-NEXT: imull $-1003, %ecx, %ecx # imm = 0xFC15
+; AVX512-NEXT: imull $-124, %esi, %esi
+; AVX512-NEXT: subl %esi, %edx
+; AVX512-NEXT: vpinsrw $1, %edx, %xmm1, %xmm1
+; AVX512-NEXT: vpextrw $2, %xmm0, %edx
+; AVX512-NEXT: subl %ecx, %eax
+; AVX512-NEXT: movswl %dx, %ecx
+; AVX512-NEXT: imull $2675, %ecx, %ecx # imm = 0xA73
+; AVX512-NEXT: movl %ecx, %esi
+; AVX512-NEXT: shrl $31, %esi
+; AVX512-NEXT: sarl $18, %ecx
+; AVX512-NEXT: addl %esi, %ecx
+; AVX512-NEXT: imull $98, %ecx, %ecx
+; AVX512-NEXT: subl %ecx, %edx
+; AVX512-NEXT: vpinsrw $2, %edx, %xmm1, %xmm0
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX512-NEXT: retq
%1 = srem <4 x i16> %x, <i16 95, i16 -124, i16 98, i16 -1003>
ret <4 x i16> %1
}
@@ -139,20 +191,35 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) {
; Don't fold if we can combine srem with sdiv.
define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) {
-; SSE-LABEL: combine_srem_sdiv:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
-; SSE-NEXT: pmulhw %xmm0, %xmm1
-; SSE-NEXT: paddw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrlw $15, %xmm2
-; SSE-NEXT: psraw $6, %xmm1
-; SSE-NEXT: paddw %xmm2, %xmm1
-; SSE-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
-; SSE-NEXT: pmullw %xmm1, %xmm2
-; SSE-NEXT: psubw %xmm2, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_srem_sdiv:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
+; SSE2-NEXT: pmulhw %xmm0, %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrlw $15, %xmm2
+; SSE2-NEXT: psraw $6, %xmm1
+; SSE2-NEXT: paddw %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
+; SSE2-NEXT: pmullw %xmm1, %xmm2
+; SSE2-NEXT: psubw %xmm2, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: combine_srem_sdiv:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
+; SSE4-NEXT: pmulhw %xmm0, %xmm1
+; SSE4-NEXT: paddw %xmm0, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: psrlw $15, %xmm2
+; SSE4-NEXT: psraw $6, %xmm1
+; SSE4-NEXT: paddw %xmm2, %xmm1
+; SSE4-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
+; SSE4-NEXT: pmullw %xmm1, %xmm2
+; SSE4-NEXT: psubw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm1, %xmm0
+; SSE4-NEXT: retq
;
; AVX-LABEL: combine_srem_sdiv:
; AVX: # %bb.0:
@@ -421,48 +488,93 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) {
; Don't fold i64 srem.
define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) {
-; SSE-LABEL: dont_fold_srem_i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: movq %xmm1, %rcx
-; SSE-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: imulq %rdx
-; SSE-NEXT: addq %rcx, %rdx
-; SSE-NEXT: movq %rdx, %rax
-; SSE-NEXT: shrq $63, %rax
-; SSE-NEXT: sarq $4, %rdx
-; SSE-NEXT: addq %rax, %rdx
-; SSE-NEXT: leaq (%rdx,%rdx,2), %rax
-; SSE-NEXT: shlq $3, %rax
-; SSE-NEXT: subq %rax, %rdx
-; SSE-NEXT: addq %rcx, %rdx
-; SSE-NEXT: movq %rdx, %xmm1
-; SSE-NEXT: pextrq $1, %xmm2, %rcx
-; SSE-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: imulq %rdx
-; SSE-NEXT: movq %rdx, %rax
-; SSE-NEXT: shrq $63, %rax
-; SSE-NEXT: sarq $11, %rdx
-; SSE-NEXT: addq %rax, %rdx
-; SSE-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
-; SSE-NEXT: subq %rax, %rcx
-; SSE-NEXT: movq %rcx, %xmm2
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE-NEXT: pextrq $1, %xmm0, %rcx
-; SSE-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: imulq %rdx
-; SSE-NEXT: movq %rdx, %rax
-; SSE-NEXT: shrq $63, %rax
-; SSE-NEXT: sarq $8, %rdx
-; SSE-NEXT: addq %rax, %rdx
-; SSE-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
-; SSE-NEXT: subq %rax, %rcx
-; SSE-NEXT: movq %rcx, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
-; SSE-NEXT: retq
+; SSE2-LABEL: dont_fold_srem_i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movq %xmm1, %rcx
+; SSE2-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: imulq %rdx
+; SSE2-NEXT: addq %rcx, %rdx
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq $4, %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: leaq (%rdx,%rdx,2), %rax
+; SSE2-NEXT: shlq $3, %rax
+; SSE2-NEXT: subq %rax, %rdx
+; SSE2-NEXT: addq %rcx, %rdx
+; SSE2-NEXT: movq %rdx, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: imulq %rdx
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq $11, %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: imulq %rdx
+; SSE2-NEXT: movq %rdx, %rax
+; SSE2-NEXT: shrq $63, %rax
+; SSE2-NEXT: sarq $8, %rdx
+; SSE2-NEXT: addq %rax, %rdx
+; SSE2-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: dont_fold_srem_i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: imulq %rdx
+; SSE4-NEXT: addq %rcx, %rdx
+; SSE4-NEXT: movq %rdx, %rax
+; SSE4-NEXT: shrq $63, %rax
+; SSE4-NEXT: sarq $4, %rdx
+; SSE4-NEXT: addq %rax, %rdx
+; SSE4-NEXT: leaq (%rdx,%rdx,2), %rax
+; SSE4-NEXT: shlq $3, %rax
+; SSE4-NEXT: subq %rax, %rdx
+; SSE4-NEXT: addq %rcx, %rdx
+; SSE4-NEXT: movq %rdx, %xmm1
+; SSE4-NEXT: pextrq $1, %xmm2, %rcx
+; SSE4-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: imulq %rdx
+; SSE4-NEXT: movq %rdx, %rax
+; SSE4-NEXT: shrq $63, %rax
+; SSE4-NEXT: sarq $11, %rdx
+; SSE4-NEXT: addq %rax, %rdx
+; SSE4-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
+; SSE4-NEXT: subq %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm2
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: imulq %rdx
+; SSE4-NEXT: movq %rdx, %rax
+; SSE4-NEXT: shrq $63, %rax
+; SSE4-NEXT: sarq $8, %rdx
+; SSE4-NEXT: addq %rax, %rdx
+; SSE4-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
+; SSE4-NEXT: subq %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm0
+; SSE4-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE4-NEXT: retq
;
; AVX1-LABEL: dont_fold_srem_i64:
; AVX1: # %bb.0:
@@ -551,6 +663,50 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) {
; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: dont_fold_srem_i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %rcx
+; AVX512-NEXT: movabsq $-5614226457215950491, %rdx # imm = 0xB21642C8590B2165
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: imulq %rdx
+; AVX512-NEXT: addq %rcx, %rdx
+; AVX512-NEXT: movq %rdx, %rax
+; AVX512-NEXT: shrq $63, %rax
+; AVX512-NEXT: sarq $4, %rdx
+; AVX512-NEXT: addq %rax, %rdx
+; AVX512-NEXT: leaq (%rdx,%rdx,2), %rax
+; AVX512-NEXT: shlq $3, %rax
+; AVX512-NEXT: subq %rax, %rdx
+; AVX512-NEXT: addq %rcx, %rdx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: vmovq %rdx, %xmm1
+; AVX512-NEXT: movabsq $6966426675817289639, %rdx # imm = 0x60ADB826E5E517A7
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: imulq %rdx
+; AVX512-NEXT: movq %rdx, %rax
+; AVX512-NEXT: shrq $63, %rax
+; AVX512-NEXT: sarq $11, %rdx
+; AVX512-NEXT: addq %rax, %rdx
+; AVX512-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
+; AVX512-NEXT: subq %rax, %rcx
+; AVX512-NEXT: vmovq %rcx, %xmm2
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm2[0]
+; AVX512-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: imulq %rdx
+; AVX512-NEXT: movq %rdx, %rax
+; AVX512-NEXT: shrq $63, %rax
+; AVX512-NEXT: sarq $8, %rdx
+; AVX512-NEXT: addq %rax, %rdx
+; AVX512-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
+; AVX512-NEXT: subq %rax, %rcx
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: retq
%1 = srem <4 x i64> %x, <i64 1, i64 654, i64 23, i64 5423>
ret <4 x i64> %1
}
diff --git a/llvm/test/CodeGen/X86/urem-vector-lkk.ll b/llvm/test/CodeGen/X86/urem-vector-lkk.ll
index 94c7892795c2b..3d0d73be9a589 100644
--- a/llvm/test/CodeGen/X86/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/X86/urem-vector-lkk.ll
@@ -1,7 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) {
; SSE-LABEL: fold_urem_vec_1:
@@ -110,16 +112,27 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) {
; Don't fold if we can combine urem with udiv.
define <4 x i16> @combine_urem_udiv(<4 x i16> %x) {
-; SSE-LABEL: combine_urem_udiv:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
-; SSE-NEXT: pmulhuw %xmm0, %xmm1
-; SSE-NEXT: psrlw $6, %xmm1
-; SSE-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
-; SSE-NEXT: pmullw %xmm1, %xmm2
-; SSE-NEXT: psubw %xmm2, %xmm0
-; SSE-NEXT: paddw %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: combine_urem_udiv:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
+; SSE2-NEXT: pmulhuw %xmm0, %xmm1
+; SSE2-NEXT: psrlw $6, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
+; SSE2-NEXT: pmullw %xmm1, %xmm2
+; SSE2-NEXT: psubw %xmm2, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: combine_urem_udiv:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa {{.*#+}} xmm1 = [44151,44151,44151,44151,44151,44151,44151,44151]
+; SSE4-NEXT: pmulhuw %xmm0, %xmm1
+; SSE4-NEXT: psrlw $6, %xmm1
+; SSE4-NEXT: pmovsxbw {{.*#+}} xmm2 = [95,95,95,95,95,95,95,95]
+; SSE4-NEXT: pmullw %xmm1, %xmm2
+; SSE4-NEXT: psubw %xmm2, %xmm0
+; SSE4-NEXT: paddw %xmm1, %xmm0
+; SSE4-NEXT: retq
;
; AVX-LABEL: combine_urem_udiv:
; AVX: # %bb.0:
@@ -137,24 +150,43 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) {
; Don't fold for divisors that are a power of two.
define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) {
-; SSE-LABEL: dont_fold_urem_power_of_two:
-; SSE: # %bb.0:
-; SSE-NEXT: pmovsxbd {{.*#+}} xmm1 = [63,63,63,63]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pextrw $1, %xmm0, %eax
-; SSE-NEXT: andl $31, %eax
-; SSE-NEXT: pinsrw $1, %eax, %xmm1
-; SSE-NEXT: pextrw $2, %xmm0, %eax
-; SSE-NEXT: andl $7, %eax
-; SSE-NEXT: pinsrw $2, %eax, %xmm1
-; SSE-NEXT: pextrw $3, %xmm0, %eax
-; SSE-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77
-; SSE-NEXT: shrl $22, %ecx
-; SSE-NEXT: imull $95, %ecx, %ecx
-; SSE-NEXT: subl %ecx, %eax
-; SSE-NEXT: pinsrw $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
+; SSE2-LABEL: dont_fold_urem_power_of_two:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [63,63,63,63]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: andl $31, %eax
+; SSE2-NEXT: pinsrw $1, %eax, %xmm1
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: andl $7, %eax
+; SSE2-NEXT: pinsrw $2, %eax, %xmm1
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77
+; SSE2-NEXT: shrl $22, %ecx
+; SSE2-NEXT: imull $95, %ecx, %ecx
+; SSE2-NEXT: subl %ecx, %eax
+; SSE2-NEXT: pinsrw $3, %eax, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: dont_fold_urem_power_of_two:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbd {{.*#+}} xmm1 = [63,63,63,63]
+; SSE4-NEXT: pand %xmm0, %xmm1
+; SSE4-NEXT: pextrw $1, %xmm0, %eax
+; SSE4-NEXT: andl $31, %eax
+; SSE4-NEXT: pinsrw $1, %eax, %xmm1
+; SSE4-NEXT: pextrw $2, %xmm0, %eax
+; SSE4-NEXT: andl $7, %eax
+; SSE4-NEXT: pinsrw $2, %eax, %xmm1
+; SSE4-NEXT: pextrw $3, %xmm0, %eax
+; SSE4-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77
+; SSE4-NEXT: shrl $22, %ecx
+; SSE4-NEXT: imull $95, %ecx, %ecx
+; SSE4-NEXT: subl %ecx, %eax
+; SSE4-NEXT: pinsrw $3, %eax, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: retq
;
; AVX1-LABEL: dont_fold_urem_power_of_two:
; AVX1: # %bb.0:
@@ -190,6 +222,23 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) {
; AVX2-NEXT: subl %ecx, %eax
; AVX2-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: dont_fold_urem_power_of_two:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-NEXT: andl $31, %eax
+; AVX512-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX512-NEXT: vpextrw $2, %xmm0, %eax
+; AVX512-NEXT: andl $7, %eax
+; AVX512-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT: vpextrw $3, %xmm0, %eax
+; AVX512-NEXT: imull $44151, %eax, %ecx # imm = 0xAC77
+; AVX512-NEXT: shrl $22, %ecx
+; AVX512-NEXT: imull $95, %ecx, %ecx
+; AVX512-NEXT: subl %ecx, %eax
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0
+; AVX512-NEXT: retq
%1 = urem <4 x i16> %x, <i16 64, i16 32, i16 8, i16 95>
ret <4 x i16> %1
}
@@ -228,36 +277,67 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) {
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: dont_fold_urem_one:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrw $2, %xmm0, %eax
-; AVX-NEXT: imull $25645, %eax, %ecx # imm = 0x642D
-; AVX-NEXT: shrl $16, %ecx
-; AVX-NEXT: movl %eax, %edx
-; AVX-NEXT: subl %ecx, %edx
-; AVX-NEXT: movzwl %dx, %edx
-; AVX-NEXT: shrl %edx
-; AVX-NEXT: addl %ecx, %edx
-; AVX-NEXT: shrl $4, %edx
-; AVX-NEXT: leal (%rdx,%rdx,2), %ecx
-; AVX-NEXT: shll $3, %ecx
-; AVX-NEXT: subl %ecx, %edx
-; AVX-NEXT: addl %eax, %edx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: imull $51307, %eax, %ecx # imm = 0xC86B
-; AVX-NEXT: shrl $25, %ecx
-; AVX-NEXT: imull $654, %ecx, %ecx # imm = 0x28E
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1
-; AVX-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NEXT: imull $12375, %eax, %ecx # imm = 0x3057
-; AVX-NEXT: shrl $26, %ecx
-; AVX-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX1OR2-LABEL: dont_fold_urem_one:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1OR2-NEXT: imull $25645, %eax, %ecx # imm = 0x642D
+; AVX1OR2-NEXT: shrl $16, %ecx
+; AVX1OR2-NEXT: movl %eax, %edx
+; AVX1OR2-NEXT: subl %ecx, %edx
+; AVX1OR2-NEXT: movzwl %dx, %edx
+; AVX1OR2-NEXT: shrl %edx
+; AVX1OR2-NEXT: addl %ecx, %edx
+; AVX1OR2-NEXT: shrl $4, %edx
+; AVX1OR2-NEXT: leal (%rdx,%rdx,2), %ecx
+; AVX1OR2-NEXT: shll $3, %ecx
+; AVX1OR2-NEXT: subl %ecx, %edx
+; AVX1OR2-NEXT: addl %eax, %edx
+; AVX1OR2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1OR2-NEXT: imull $51307, %eax, %ecx # imm = 0xC86B
+; AVX1OR2-NEXT: shrl $25, %ecx
+; AVX1OR2-NEXT: imull $654, %ecx, %ecx # imm = 0x28E
+; AVX1OR2-NEXT: subl %ecx, %eax
+; AVX1OR2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1OR2-NEXT: imull $12375, %eax, %ecx # imm = 0x3057
+; AVX1OR2-NEXT: shrl $26, %ecx
+; AVX1OR2-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F
+; AVX1OR2-NEXT: subl %ecx, %eax
+; AVX1OR2-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0
+; AVX1OR2-NEXT: retq
+;
+; AVX512-LABEL: dont_fold_urem_one:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpextrw $2, %xmm0, %eax
+; AVX512-NEXT: imull $25645, %eax, %ecx # imm = 0x642D
+; AVX512-NEXT: shrl $16, %ecx
+; AVX512-NEXT: movl %eax, %edx
+; AVX512-NEXT: subl %ecx, %edx
+; AVX512-NEXT: movzwl %dx, %edx
+; AVX512-NEXT: shrl %edx
+; AVX512-NEXT: addl %ecx, %edx
+; AVX512-NEXT: shrl $4, %edx
+; AVX512-NEXT: leal (%rdx,%rdx,2), %ecx
+; AVX512-NEXT: shll $3, %ecx
+; AVX512-NEXT: subl %ecx, %edx
+; AVX512-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: imull $51307, %ecx, %eax # imm = 0xC86B
+; AVX512-NEXT: shrl $25, %eax
+; AVX512-NEXT: imull $654, %eax, %eax # imm = 0x28E
+; AVX512-NEXT: subl %eax, %ecx
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1
+; AVX512-NEXT: vpextrw $3, %xmm0, %eax
+; AVX512-NEXT: imull $12375, %eax, %ecx # imm = 0x3057
+; AVX512-NEXT: shrl $26, %ecx
+; AVX512-NEXT: imull $5423, %ecx, %ecx # imm = 0x152F
+; AVX512-NEXT: subl %ecx, %eax
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm0
+; AVX512-NEXT: retq
%1 = urem <4 x i16> %x, <i16 1, i16 654, i16 23, i16 5423>
ret <4 x i16> %1
}
@@ -267,49 +347,96 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) {
; CHECK-LABEL: dont_fold_urem_i16_smax:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
+; SSE-LABEL: dont_fold_urem_i16_smax:
+; SSE: # %bb.0:
+; SSE-NEXT: retq
+;
+; AVX-LABEL: dont_fold_urem_i16_smax:
+; AVX: # %bb.0:
+; AVX-NEXT: retq
%1 = urem <4 x i16> %x, <i16 1, i16 65536, i16 23, i16 5423>
ret <4 x i16> %1
}
; Don't fold i64 urem.
define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) {
-; SSE-LABEL: dont_fold_urem_i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movq %xmm1, %rcx
-; SSE-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: mulq %rdx
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: subq %rdx, %rax
-; SSE-NEXT: shrq %rax
-; SSE-NEXT: addq %rdx, %rax
-; SSE-NEXT: shrq $4, %rax
-; SSE-NEXT: leaq (%rax,%rax,2), %rdx
-; SSE-NEXT: shlq $3, %rdx
-; SSE-NEXT: subq %rdx, %rax
-; SSE-NEXT: addq %rcx, %rax
-; SSE-NEXT: movq %rax, %xmm2
-; SSE-NEXT: pextrq $1, %xmm1, %rcx
-; SSE-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: mulq %rdx
-; SSE-NEXT: shrq $12, %rdx
-; SSE-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
-; SSE-NEXT: subq %rax, %rcx
-; SSE-NEXT: movq %rcx, %xmm1
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE-NEXT: pextrq $1, %xmm0, %rcx
-; SSE-NEXT: movq %rcx, %rax
-; SSE-NEXT: shrq %rax
-; SSE-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
-; SSE-NEXT: mulq %rdx
-; SSE-NEXT: shrq $7, %rdx
-; SSE-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
-; SSE-NEXT: subq %rax, %rcx
-; SSE-NEXT: movq %rcx, %xmm0
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: retq
+; SSE2-LABEL: dont_fold_urem_i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movq %xmm1, %rcx
+; SSE2-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rdx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: shrq %rax
+; SSE2-NEXT: addq %rdx, %rax
+; SSE2-NEXT: shrq $4, %rax
+; SSE2-NEXT: leaq (%rax,%rax,2), %rdx
+; SSE2-NEXT: shlq $3, %rdx
+; SSE2-NEXT: subq %rdx, %rax
+; SSE2-NEXT: addq %rcx, %rax
+; SSE2-NEXT: movq %rax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: mulq %rdx
+; SSE2-NEXT: shrq $12, %rdx
+; SSE2-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rcx
+; SSE2-NEXT: movq %rcx, %rax
+; SSE2-NEXT: shrq %rax
+; SSE2-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
+; SSE2-NEXT: mulq %rdx
+; SSE2-NEXT: shrq $7, %rdx
+; SSE2-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
+; SSE2-NEXT: subq %rax, %rcx
+; SSE2-NEXT: movq %rcx, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: dont_fold_urem_i64:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movq %xmm1, %rcx
+; SSE4-NEXT: movabsq $7218291159277650633, %rdx # imm = 0x642C8590B21642C9
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: mulq %rdx
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: subq %rdx, %rax
+; SSE4-NEXT: shrq %rax
+; SSE4-NEXT: addq %rdx, %rax
+; SSE4-NEXT: shrq $4, %rax
+; SSE4-NEXT: leaq (%rax,%rax,2), %rdx
+; SSE4-NEXT: shlq $3, %rdx
+; SSE4-NEXT: subq %rdx, %rax
+; SSE4-NEXT: addq %rcx, %rax
+; SSE4-NEXT: movq %rax, %xmm2
+; SSE4-NEXT: pextrq $1, %xmm1, %rcx
+; SSE4-NEXT: movabsq $-4513890722074972339, %rdx # imm = 0xC15B704DCBCA2F4D
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: mulq %rdx
+; SSE4-NEXT: shrq $12, %rdx
+; SSE4-NEXT: imulq $5423, %rdx, %rax # imm = 0x152F
+; SSE4-NEXT: subq %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm1
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE4-NEXT: pextrq $1, %xmm0, %rcx
+; SSE4-NEXT: movq %rcx, %rax
+; SSE4-NEXT: shrq %rax
+; SSE4-NEXT: movabsq $7220743857598845893, %rdx # imm = 0x64353C48064353C5
+; SSE4-NEXT: mulq %rdx
+; SSE4-NEXT: shrq $7, %rdx
+; SSE4-NEXT: imulq $654, %rdx, %rax # imm = 0x28E
+; SSE4-NEXT: subq %rax, %rcx
+; SSE4-NEXT: movq %rcx, %xmm0
+; SSE4-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; SSE4-NEXT: movdqa %xmm2, %xmm1
+; SSE4-NEXT: retq
;
; AVX1-LABEL: dont_fold_urem_i64:
; AVX1: # %bb.0:
@@ -388,6 +515,43 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) {
; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: dont_fold_urem_i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %rdx
+; AVX512-NEXT: movabsq $7218291159277650633, %rax # imm = 0x642C8590B21642C9
+; AVX512-NEXT: mulxq %rax, %rax, %rax
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: subq %rax, %rcx
+; AVX512-NEXT: shrq %rcx
+; AVX512-NEXT: addq %rax, %rcx
+; AVX512-NEXT: shrq $4, %rcx
+; AVX512-NEXT: leaq (%rcx,%rcx,2), %rax
+; AVX512-NEXT: shlq $3, %rax
+; AVX512-NEXT: subq %rax, %rcx
+; AVX512-NEXT: addq %rdx, %rcx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512-NEXT: movabsq $-4513890722074972339, %rax # imm = 0xC15B704DCBCA2F4D
+; AVX512-NEXT: mulxq %rax, %rax, %rax
+; AVX512-NEXT: vmovq %rcx, %xmm1
+; AVX512-NEXT: shrq $12, %rax
+; AVX512-NEXT: imulq $5423, %rax, %rax # imm = 0x152F
+; AVX512-NEXT: subq %rax, %rdx
+; AVX512-NEXT: vmovq %rdx, %xmm2
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: movq %rax, %rdx
+; AVX512-NEXT: shrq %rdx
+; AVX512-NEXT: movabsq $7220743857598845893, %rcx # imm = 0x64353C48064353C5
+; AVX512-NEXT: mulxq %rcx, %rcx, %rcx
+; AVX512-NEXT: shrq $7, %rcx
+; AVX512-NEXT: imulq $654, %rcx, %rcx # imm = 0x28E
+; AVX512-NEXT: subq %rcx, %rax
+; AVX512-NEXT: vmovq %rax, %xmm0
+; AVX512-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%1 = urem <4 x i64> %x, <i64 1, i64 654, i64 23, i64 5423>
ret <4 x i64> %1
}
More information about the llvm-commits
mailing list