[llvm] [RFC][IR] Add llvm.masked.{udiv, sdiv, urem, srem} intrinsics (PR #189705)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 1 09:51:42 PDT 2026
================
@@ -0,0 +1,756 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple x86_64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE2
+; RUN: llc -mtriple x86_64 -mattr=+avx512 < %s | FileCheck %s --check-prefix=AVX512
+
+; Legal
+define <4 x i32> @udiv_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i1> %m) {
+; SSE2-LABEL: udiv_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pslld $31, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE2-NEXT: movd %xmm3, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE2-NEXT: movd %xmm3, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pslld $31, %xmm2
+; AVX512-NEXT: psrad $31, %xmm2
+; AVX512-NEXT: pand %xmm2, %xmm1
+; AVX512-NEXT: paddd %xmm2, %xmm1
+; AVX512-NEXT: pcmpeqd %xmm2, %xmm2
+; AVX512-NEXT: psubd %xmm2, %xmm1
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX512-NEXT: movd %xmm2, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX512-NEXT: movd %xmm2, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX512-NEXT: movd %xmm3, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; AVX512-NEXT: movd %xmm3, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm3
+; AVX512-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX512-NEXT: movd %xmm1, %ecx
+; AVX512-NEXT: movd %xmm0, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; AVX512-NEXT: movd %xmm1, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX512-NEXT: movd %xmm0, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm0
+; AVX512-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512-NEXT: movdqa %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %res = call <4 x i32> @llvm.masked.udiv(<4 x i32> %x, <4 x i32> %y, <4 x i1> %m)
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @udiv_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i1> %m) {
+; SSE2-LABEL: udiv_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; SSE2-NEXT: pslld $31, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX512-NEXT: pslld $31, %xmm2
+; AVX512-NEXT: psrad $31, %xmm2
+; AVX512-NEXT: pand %xmm2, %xmm1
+; AVX512-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; AVX512-NEXT: por %xmm1, %xmm2
+; AVX512-NEXT: movq %xmm2, %rcx
+; AVX512-NEXT: movq %xmm0, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm1
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX512-NEXT: movq %xmm2, %rcx
+; AVX512-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX512-NEXT: movq %xmm0, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm0
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX512-NEXT: movdqa %xmm1, %xmm0
+; AVX512-NEXT: retq
+ %res = call <2 x i64> @llvm.masked.udiv(<2 x i64> %x, <2 x i64> %y, <2 x i1> %m)
+ ret <2 x i64> %res
+}
+
+; Splitting
+define <4 x i64> @udiv_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i1> %m) {
+; SSE2-LABEL: udiv_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,2,3,3]
+; SSE2-NEXT: pslld $31, %xmm6
+; SSE2-NEXT: psrad $31, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,1,1]
+; SSE2-NEXT: pslld $31, %xmm4
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [1,1]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pandn %xmm7, %xmm4
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm2
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pandn %xmm7, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm6
+; SSE2-NEXT: movq %xmm6, %rcx
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: movq %rax, %xmm1
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v4i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movdqa %xmm0, %xmm5
+; AVX512-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,2,3,3]
+; AVX512-NEXT: pslld $31, %xmm6
+; AVX512-NEXT: psrad $31, %xmm6
+; AVX512-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,1,1]
+; AVX512-NEXT: pslld $31, %xmm4
+; AVX512-NEXT: psrad $31, %xmm4
+; AVX512-NEXT: movdqa {{.*#+}} xmm7 = [1,1]
+; AVX512-NEXT: pand %xmm4, %xmm2
+; AVX512-NEXT: pandn %xmm7, %xmm4
+; AVX512-NEXT: por %xmm2, %xmm4
+; AVX512-NEXT: movq %xmm4, %rcx
+; AVX512-NEXT: movq %xmm0, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm0
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; AVX512-NEXT: movq %xmm2, %rcx
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; AVX512-NEXT: movq %xmm2, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm2
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT: pand %xmm6, %xmm3
+; AVX512-NEXT: pandn %xmm7, %xmm6
+; AVX512-NEXT: por %xmm3, %xmm6
+; AVX512-NEXT: movq %xmm6, %rcx
+; AVX512-NEXT: movq %xmm1, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm2
+; AVX512-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; AVX512-NEXT: movq %xmm3, %rcx
+; AVX512-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX512-NEXT: movq %xmm1, %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: movq %rax, %xmm1
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; AVX512-NEXT: movdqa %xmm2, %xmm1
+; AVX512-NEXT: retq
+ %res = call <4 x i64> @llvm.masked.udiv(<4 x i64> %x, <4 x i64> %y, <4 x i1> %m)
+ ret <4 x i64> %res
+}
+
+; Widening
+define <2 x i32> @udiv_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i1> %m) {
+; SSE2-LABEL: udiv_v2i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: xorps %xmm3, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[2,3]
+; SSE2-NEXT: pslld $31, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE2-NEXT: movd %xmm3, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE2-NEXT: movd %xmm3, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divl %ecx
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v2i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: xorps %xmm3, %xmm3
+; AVX512-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[2,3]
+; AVX512-NEXT: pslld $31, %xmm2
+; AVX512-NEXT: psrad $31, %xmm2
+; AVX512-NEXT: pand %xmm2, %xmm1
+; AVX512-NEXT: paddd %xmm2, %xmm1
+; AVX512-NEXT: pcmpeqd %xmm2, %xmm2
+; AVX512-NEXT: psubd %xmm2, %xmm1
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX512-NEXT: movd %xmm2, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX512-NEXT: movd %xmm2, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; AVX512-NEXT: movd %xmm3, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; AVX512-NEXT: movd %xmm3, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm3
+; AVX512-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX512-NEXT: movd %xmm1, %ecx
+; AVX512-NEXT: movd %xmm0, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; AVX512-NEXT: movd %xmm1, %ecx
+; AVX512-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX512-NEXT: movd %xmm0, %eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divl %ecx
+; AVX512-NEXT: movd %eax, %xmm0
+; AVX512-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512-NEXT: movdqa %xmm2, %xmm0
+; AVX512-NEXT: retq
+ %res = call <2 x i32> @llvm.masked.udiv(<2 x i32> %x, <2 x i32> %y, <2 x i1> %m)
+ ret <2 x i32> %res
+}
+
+; Promotion
+define <4 x i16> @udiv_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i1> %m) {
+; SSE2-LABEL: udiv_v4i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: psllw $15, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: paddw %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psubw %xmm2, %xmm1
+; SSE2-NEXT: pextrw $7, %xmm1, %ecx
+; SSE2-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: pextrw $6, %xmm1, %ecx
+; SSE2-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: pextrw $5, %xmm1, %ecx
+; SSE2-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: pextrw $4, %xmm1, %ecx
+; SSE2-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: pextrw $3, %xmm1, %ecx
+; SSE2-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pextrw $2, %xmm1, %ecx
+; SSE2-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pextrw $1, %xmm1, %ecx
+; SSE2-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divw %cx
+; SSE2-NEXT: # kill: def $ax killed $ax def $eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v4i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX512-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; AVX512-NEXT: psrldq {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: psllw $15, %xmm2
+; AVX512-NEXT: psraw $15, %xmm2
+; AVX512-NEXT: pand %xmm2, %xmm1
+; AVX512-NEXT: paddw %xmm2, %xmm1
+; AVX512-NEXT: pcmpeqd %xmm2, %xmm2
+; AVX512-NEXT: psubw %xmm2, %xmm1
+; AVX512-NEXT: pextrw $7, %xmm1, %ecx
+; AVX512-NEXT: pextrw $7, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: pextrw $6, %xmm1, %ecx
+; AVX512-NEXT: pextrw $6, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm3
+; AVX512-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512-NEXT: pextrw $5, %xmm1, %ecx
+; AVX512-NEXT: pextrw $5, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm4
+; AVX512-NEXT: pextrw $4, %xmm1, %ecx
+; AVX512-NEXT: pextrw $4, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm2
+; AVX512-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; AVX512-NEXT: pextrw $3, %xmm1, %ecx
+; AVX512-NEXT: pextrw $3, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm3
+; AVX512-NEXT: pextrw $2, %xmm1, %ecx
+; AVX512-NEXT: pextrw $2, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm4
+; AVX512-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512-NEXT: pextrw $1, %xmm1, %ecx
+; AVX512-NEXT: pextrw $1, %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm3
+; AVX512-NEXT: movd %xmm1, %ecx
+; AVX512-NEXT: movd %xmm0, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divw %cx
+; AVX512-NEXT: # kill: def $ax killed $ax def $eax
+; AVX512-NEXT: movd %eax, %xmm0
+; AVX512-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; AVX512-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT: retq
+ %res = call <4 x i16> @llvm.masked.udiv(<4 x i16> %x, <4 x i16> %y, <4 x i1> %m)
+ ret <4 x i16> %res
+}
+
+; Scalarization
+define <1 x i64> @udiv_v1i164(<1 x i64> %x, <1 x i64> %y, <1 x i1> %m) {
+; SSE2-LABEL: udiv_v1i164:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: testb $1, %dl
+; SSE2-NEXT: movl $1, %ecx
+; SSE2-NEXT: cmovneq %rsi, %rcx
+; SSE2-NEXT: xorl %edx, %edx
+; SSE2-NEXT: divq %rcx
+; SSE2-NEXT: retq
+;
+; AVX512-LABEL: udiv_v1i164:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq %rdi, %rax
+; AVX512-NEXT: testb $1, %dl
+; AVX512-NEXT: movl $1, %ecx
+; AVX512-NEXT: cmovneq %rsi, %rcx
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: divq %rcx
+; AVX512-NEXT: retq
+ %res = call <1 x i64> @llvm.masked.udiv(<1 x i64> %x, <1 x i64> %y, <1 x i1> %m)
+ ret <1 x i64> %res
+}
+
+; Expansion
+define <2 x i128> @udiv_v2i128(<2 x i128> %x, <2 x i128> %y, <2 x i1> %m) {
----------------
lukel97 wrote:
Done in 443a665faaf8bd0143948e4c55e671d176e04269
https://github.com/llvm/llvm-project/pull/189705
More information about the llvm-commits
mailing list