[llvm] e28d582 - [X86] Add some very basic test coverage for the few @llvm.vp.* intrinsics that correctly expand

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 3 05:25:23 PST 2023


Author: Simon Pilgrim
Date: 2023-02-03T13:25:10Z
New Revision: e28d5820a0453e15b6213bde75298aa359a6d661

URL: https://github.com/llvm/llvm-project/commit/e28d5820a0453e15b6213bde75298aa359a6d661
DIFF: https://github.com/llvm/llvm-project/commit/e28d5820a0453e15b6213bde75298aa359a6d661.diff

LOG: [X86] Add some very basic test coverage for the few @llvm.vp.* intrinsics that correctly expand

Inspired by Issue #60464 - wel can add/enable additional intrinsics when we add expansion support

Added: 
    llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
    llvm/test/CodeGen/X86/expand-vp-int-intrinsics.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
new file mode 100644
index 0000000000000..1b5a8f5221b9c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
+
+define void @vp_fadd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_fadd_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    addps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_fadd_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x float> %res, ptr %out
+  ret void
+}
+declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+define void @vp_fsub_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_fsub_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    subps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_fsub_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x float> %res, ptr %out
+  ret void
+}
+declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+define void @vp_fmul_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_fmul_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mulps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_fmul_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x float> %res, ptr %out
+  ret void
+}
+declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+define void @vp_fdiv_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_fdiv_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    divps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_fdiv_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vdivps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x float> %res, ptr %out
+  ret void
+}
+declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+define void @vp_frem_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_frem_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pushq %rbx
+; SSE-NEXT:    subq $64, %rsp
+; SSE-NEXT:    movq %rdi, %rbx
+; SSE-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE-NEXT:    callq fmodf at PLT
+; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT:    callq fmodf at PLT
+; SSE-NEXT:    unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    callq fmodf at PLT
+; SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT:    callq fmodf at PLT
+; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT:    unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; SSE-NEXT:    movaps %xmm1, (%rbx)
+; SSE-NEXT:    addq $64, %rsp
+; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_frem_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    subq $48, %rsp
+; AVX-NEXT:    movq %rdi, %rbx
+; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    callq fmodf at PLT
+; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = mem[1,1,3,3]
+; AVX-NEXT:    vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = mem[1,1,3,3]
+; AVX-NEXT:    callq fmodf at PLT
+; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = mem[1,0]
+; AVX-NEXT:    vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = mem[1,0]
+; AVX-NEXT:    callq fmodf at PLT
+; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm0 = mem[3,3,3,3]
+; AVX-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX-NEXT:    # xmm1 = mem[3,3,3,3]
+; AVX-NEXT:    callq fmodf at PLT
+; AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX-NEXT:    vmovaps %xmm0, (%rbx)
+; AVX-NEXT:    addq $48, %rsp
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
+  %res = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x float> %res, ptr %out
+  ret void
+}
+declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+; TODO: @llvm.vp.fabs.v4f32
+;define void @vp_fabs_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x float> %res, ptr %out
+;  ret void
+;}
+;declare <4 x float> @llvm.vp.fabs.v4f32(<4 x float>, <4 x i1>, i32)
+
+; TODO: @llvm.vp.sqrt.v4f32
+;define void @vp_sqrt_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x float> %res, ptr %out
+;  ret void
+;}
+;declare <4 x float> @llvm.vp.sqrt.v4f32(<4 x float>, <4 x i1>, i32)
+
+; TODO: @llvm.vp.fneg.v4f32
+;define void @vp_fneg_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x float> %res, ptr %out
+;  ret void
+;}
+;declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32)
+
+; TODO: @llvm.vp.fma.v4f32
+;define void @vp_fma_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5) nounwind {
+;  %res = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 4)
+;  store <4 x float> %res, ptr %out
+;  ret void
+;}
+;declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32)
+
+; TODO: @llvm.vp.fmuladd.v4f32
+;define void @vp_fmuladd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5) nounwind {
+;  %res = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 4)
+;  store <4 x float> %res, ptr %out
+;  ret void
+;}
+;declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}
+; AVX512: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/expand-vp-int-intrinsics.ll b/llvm/test/CodeGen/X86/expand-vp-int-intrinsics.ll
new file mode 100644
index 0000000000000..db52dd6b47b46
--- /dev/null
+++ b/llvm/test/CodeGen/X86/expand-vp-int-intrinsics.ll
@@ -0,0 +1,883 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown                  | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
+
+define void @vp_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_add_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddd %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_add_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_sub_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_sub_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psubd %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_sub_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_mul_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_mul_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_mul_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_sdiv_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_sdiv_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %esi, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    paddd %xmm2, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    psubd %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %eax, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %eax, %xmm3
+; SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %eax, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movdqa %xmm2, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_sdiv_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd %esi, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX1-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    vmovd %xmm1, %esi
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %esi
+; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_sdiv_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd %esi, %xmm2
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX2-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vblendvps %xmm2, %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    vmovd %xmm1, %esi
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %esi
+; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_sdiv_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %esi, %xmm2
+; AVX512-NEXT:    vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
+; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpextrd $1, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    vmovd %xmm2, %esi
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %esi
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $2, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $3, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_udiv_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_udiv_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %esi, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    paddd %xmm2, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    psubd %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %eax, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %eax, %xmm3
+; SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %eax, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movdqa %xmm2, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_udiv_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd %esi, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX1-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    movl %eax, %ecx
+; AVX1-NEXT:    vmovd %xmm1, %esi
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %esi
+; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_udiv_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd %esi, %xmm2
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX2-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vblendvps %xmm2, %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    movl %eax, %ecx
+; AVX2-NEXT:    vmovd %xmm1, %esi
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %esi
+; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_udiv_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %esi, %xmm2
+; AVX512-NEXT:    vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
+; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpextrd $1, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    movl %eax, %ecx
+; AVX512-NEXT:    vmovd %xmm2, %esi
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %esi
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $2, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $3, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_srem_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_srem_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %esi, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    paddd %xmm2, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    psubd %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %edx, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %edx, %xmm3
+; SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %edx, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    cltd
+; SSE-NEXT:    idivl %ecx
+; SSE-NEXT:    movd %edx, %xmm0
+; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movdqa %xmm2, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_srem_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd %esi, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX1-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    movl %edx, %ecx
+; AVX1-NEXT:    vmovd %xmm1, %esi
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %esi
+; AVX1-NEXT:    vmovd %edx, %xmm2
+; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX1-NEXT:    cltd
+; AVX1-NEXT:    idivl %ecx
+; AVX1-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_srem_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd %esi, %xmm2
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX2-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vblendvps %xmm2, %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    movl %edx, %ecx
+; AVX2-NEXT:    vmovd %xmm1, %esi
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %esi
+; AVX2-NEXT:    vmovd %edx, %xmm2
+; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    cltd
+; AVX2-NEXT:    idivl %ecx
+; AVX2-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_srem_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %esi, %xmm2
+; AVX512-NEXT:    vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
+; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpextrd $1, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    movl %edx, %ecx
+; AVX512-NEXT:    vmovd %xmm2, %esi
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %esi
+; AVX512-NEXT:    vmovd %edx, %xmm1
+; AVX512-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $2, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    vpinsrd $2, %edx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $3, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX512-NEXT:    cltd
+; AVX512-NEXT:    idivl %ecx
+; AVX512-NEXT:    vpinsrd $3, %edx, %xmm1, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_urem_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_urem_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movd %esi, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    pand %xmm2, %xmm1
+; SSE-NEXT:    paddd %xmm2, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    psubd %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; SSE-NEXT:    movd %xmm2, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %edx, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; SSE-NEXT:    movd %xmm3, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %edx, %xmm3
+; SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %edx, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE-NEXT:    movd %xmm1, %ecx
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE-NEXT:    movd %xmm0, %eax
+; SSE-NEXT:    xorl %edx, %edx
+; SSE-NEXT:    divl %ecx
+; SSE-NEXT:    movd %edx, %xmm0
+; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT:    movdqa %xmm2, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_urem_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovd %esi, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX1-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    movl %edx, %ecx
+; AVX1-NEXT:    vmovd %xmm1, %esi
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %esi
+; AVX1-NEXT:    vmovd %edx, %xmm2
+; AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX1-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    divl %ecx
+; AVX1-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_urem_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovd %esi, %xmm2
+; AVX2-NEXT:    vpbroadcastd %xmm2, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
+; AVX2-NEXT:    vpmaxud %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vblendvps %xmm2, %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vextractps $1, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    movl %edx, %ecx
+; AVX2-NEXT:    vmovd %xmm1, %esi
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %esi
+; AVX2-NEXT:    vmovd %edx, %xmm2
+; AVX2-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
+; AVX2-NEXT:    vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    xorl %edx, %edx
+; AVX2-NEXT:    divl %ecx
+; AVX2-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_urem_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastd %esi, %xmm2
+; AVX512-NEXT:    vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
+; AVX512-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
+; AVX512-NEXT:    vmovdqa32 %xmm1, %xmm2 {%k1}
+; AVX512-NEXT:    vpextrd $1, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    movl %edx, %ecx
+; AVX512-NEXT:    vmovd %xmm2, %esi
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %esi
+; AVX512-NEXT:    vmovd %edx, %xmm1
+; AVX512-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $2, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    vpinsrd $2, %edx, %xmm1, %xmm1
+; AVX512-NEXT:    vpextrd $3, %xmm2, %ecx
+; AVX512-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX512-NEXT:    xorl %edx, %edx
+; AVX512-NEXT:    divl %ecx
+; AVX512-NEXT:    vpinsrd $3, %edx, %xmm1, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_ashr_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_ashr_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    psrad %xmm2, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    psrad %xmm2, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    psrad %xmm2, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT:    psrad %xmm1, %xmm0
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3]
+; SSE-NEXT:    movaps %xmm4, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_ashr_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrad %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrad %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT:    vpsrad %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_ashr_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_ashr_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_lshr_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_lshr_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    psrld %xmm2, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm4
+; SSE-NEXT:    psrld %xmm2, %xmm4
+; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    psrld %xmm2, %xmm3
+; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
+; SSE-NEXT:    psrld %xmm1, %xmm0
+; SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; SSE-NEXT:    shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3]
+; SSE-NEXT:    movaps %xmm4, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_lshr_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrld %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-NEXT:    vpsrld %xmm3, %xmm0, %xmm3
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_lshr_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_lshr_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_shl_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_shl_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pslld $23, %xmm1
+; SSE-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT:    cvttps2dq %xmm1, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm2, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT:    movdqa %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vp_shl_v4i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vp_shl_v4i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: vp_shl_v4i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa %xmm0, (%rdi)
+; AVX512-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_or_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_or_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_or_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_and_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_and_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    orps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_and_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @vp_xor_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+; SSE-LABEL: vp_xor_v4i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    xorps %xmm1, %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vp_xor_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %res = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+  store <4 x i32> %res, ptr %out
+  ret void
+}
+declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+; TODO: llvm.vp.abs.v4i32
+;define void @vp_abs_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %a0, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x i32> %res, ptr %out
+;  ret void
+;}
+;declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, <4 x i1>, i32)
+
+; TODO: llvm.vp.smax.v4i32
+;define void @vp_smax_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x i32> %res, ptr %out
+;  ret void
+;}
+;declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+; TODO: llvm.vp.smin.v4i32
+;define void @vp_smin_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x i32> %res, ptr %out
+;  ret void
+;}
+;declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+; TODO: llvm.vp.umax.v4i32
+;define void @vp_umax_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x i32> %res, ptr %out
+;  ret void
+;}
+;declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+; TODO: llvm.vp.umin.v4i32
+;define void @vp_umin_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
+;  %res = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
+;  store <4 x i32> %res, ptr %out
+;  ret void
+;}
+;declare <4 x i32> @llvm.vp.umin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)


        


More information about the llvm-commits mailing list