[llvm] r368184 - Recommit r368079 "[X86] Remove uses of the -x86-experimental-vector-widening-legalization flag from test/CodeGen/X86/"
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 7 09:33:38 PDT 2019
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-mul-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-mul-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-mul-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-mul-widen.ll (removed)
@@ -1,3022 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL --check-prefix=AVX512BWVL
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL --check-prefix=AVX512DQVL
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE-LABEL: test_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v2i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v2i64:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovq %xmm0, %rax
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v2i64:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vmovq %xmm0, %rax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v2i64:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovq %xmm0, %rax
-; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE-LABEL: test_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v4i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v4i64:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovq %xmm0, %rax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v4i64:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vmovq %xmm0, %rax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v4i64:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovq %xmm0, %rax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE-LABEL: test_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm1, %xmm5
-; SSE-NEXT: paddq %xmm4, %xmm5
-; SSE-NEXT: psllq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm3, %xmm1
-; SSE-NEXT: paddq %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm0, %xmm4
-; SSE-NEXT: paddq %xmm3, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm5
-; AVX1-NEXT: vpmuludq %xmm5, %xmm3, %xmm5
-; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
-; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
-; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
-; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm1
-; AVX1-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm3
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
-; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
-; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
-; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v8i64:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovq %xmm0, %rax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v8i64:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vmovq %xmm0, %rax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v8i64:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovq %xmm0, %rax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE-LABEL: test_v16i64:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: psrlq $32, %xmm8
-; SSE-NEXT: pmuludq %xmm6, %xmm8
-; SSE-NEXT: movdqa %xmm6, %xmm9
-; SSE-NEXT: psrlq $32, %xmm9
-; SSE-NEXT: pmuludq %xmm2, %xmm9
-; SSE-NEXT: paddq %xmm8, %xmm9
-; SSE-NEXT: psllq $32, %xmm9
-; SSE-NEXT: pmuludq %xmm6, %xmm2
-; SSE-NEXT: paddq %xmm9, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: psrlq $32, %xmm8
-; SSE-NEXT: pmuludq %xmm4, %xmm8
-; SSE-NEXT: movdqa %xmm4, %xmm6
-; SSE-NEXT: psrlq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm0, %xmm6
-; SSE-NEXT: paddq %xmm8, %xmm6
-; SSE-NEXT: psllq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm4, %xmm0
-; SSE-NEXT: paddq %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm7, %xmm4
-; SSE-NEXT: movdqa %xmm7, %xmm6
-; SSE-NEXT: psrlq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm3, %xmm6
-; SSE-NEXT: paddq %xmm4, %xmm6
-; SSE-NEXT: psllq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm7, %xmm3
-; SSE-NEXT: paddq %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm5, %xmm4
-; SSE-NEXT: movdqa %xmm5, %xmm6
-; SSE-NEXT: psrlq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm1, %xmm6
-; SSE-NEXT: paddq %xmm4, %xmm6
-; SSE-NEXT: psllq $32, %xmm6
-; SSE-NEXT: pmuludq %xmm5, %xmm1
-; SSE-NEXT: paddq %xmm6, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: psrlq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm1, %xmm5
-; SSE-NEXT: paddq %xmm4, %xmm5
-; SSE-NEXT: psllq $32, %xmm5
-; SSE-NEXT: pmuludq %xmm3, %xmm1
-; SSE-NEXT: paddq %xmm5, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: psrlq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm0, %xmm4
-; SSE-NEXT: paddq %xmm3, %xmm4
-; SSE-NEXT: psllq $32, %xmm4
-; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: paddq %xmm4, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: psrlq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrlq $32, %xmm2
-; SSE-NEXT: pmuludq %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: pmuludq %xmm0, %xmm3
-; SSE-NEXT: paddq %xmm2, %xmm3
-; SSE-NEXT: psllq $32, %xmm3
-; SSE-NEXT: pmuludq %xmm1, %xmm0
-; SSE-NEXT: paddq %xmm3, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
-; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
-; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm5
-; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
-; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm5
-; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm5
-; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
-; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
-; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpsllq $32, %xmm5, %xmm5
-; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm6
-; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm6
-; AVX1-NEXT: vpmuludq %xmm3, %xmm6, %xmm6
-; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm7
-; AVX1-NEXT: vpmuludq %xmm7, %xmm1, %xmm7
-; AVX1-NEXT: vpaddq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vpsllq $32, %xmm6, %xmm6
-; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
-; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
-; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm6
-; AVX1-NEXT: vpaddq %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
-; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm1
-; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm2
-; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX1-NEXT: vpmuludq %xmm4, %xmm5, %xmm2
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm2
-; AVX1-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
-; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm4
-; AVX2-NEXT: vpmuludq %ymm3, %ymm4, %ymm4
-; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm5
-; AVX2-NEXT: vpmuludq %ymm5, %ymm1, %ymm5
-; AVX2-NEXT: vpaddq %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpsllq $32, %ymm4, %ymm4
-; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm3
-; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm3
-; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm4
-; AVX2-NEXT: vpmuludq %ymm4, %ymm0, %ymm4
-; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vpsllq $32, %ymm3, %ymm3
-; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm2
-; AVX2-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
-; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
-; AVX2-NEXT: vpaddq %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2
-; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BW-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BW-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v16i64:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm0, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %zmm1, %zmm3
-; AVX512BWVL-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm3, %zmm2
-; AVX512BWVL-NEXT: vpsllq $32, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX512BWVL-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX512BWVL-NEXT: vpsllq $32, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovq %xmm0, %rax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v16i64:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vmovq %xmm0, %rax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v16i64:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovq %xmm0, %rax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.mul.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE2-LABEL: test_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE2-LABEL: test_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,1,1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmulld %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE2-LABEL: test_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,2,0,0]
-; SSE2-NEXT: pmuludq %xmm3, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmulld %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE2-LABEL: test_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm5, %xmm2
-; SSE2-NEXT: pmuludq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,0,0]
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld %xmm3, %xmm1
-; SSE41-NEXT: pmulld %xmm2, %xmm1
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmulld %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE2-LABEL: test_v32i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm6[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm8, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm8, %xmm10
-; SSE2-NEXT: pmuludq %xmm9, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm8, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm5[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm8, %xmm11
-; SSE2-NEXT: pmuludq %xmm9, %xmm11
-; SSE2-NEXT: pmuludq %xmm10, %xmm11
-; SSE2-NEXT: pmuludq %xmm6, %xmm2
-; SSE2-NEXT: pmuludq %xmm4, %xmm0
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
-; SSE2-NEXT: pmuludq %xmm7, %xmm3
-; SSE2-NEXT: pmuludq %xmm5, %xmm1
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,2,0,0]
-; SSE2-NEXT: pmuludq %xmm11, %xmm1
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmulld %xmm6, %xmm2
-; SSE41-NEXT: pmulld %xmm7, %xmm3
-; SSE41-NEXT: pmulld %xmm5, %xmm3
-; SSE41-NEXT: pmulld %xmm1, %xmm3
-; SSE41-NEXT: pmulld %xmm4, %xmm2
-; SSE41-NEXT: pmulld %xmm3, %xmm2
-; SSE41-NEXT: pmulld %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmulld %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmulld %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.mul.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE-LABEL: test_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE-LABEL: test_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE-LABEL: test_v8i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE-LABEL: test_v16i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE-LABEL: test_v32i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pmullw %xmm3, %xmm1
-; SSE-NEXT: pmullw %xmm2, %xmm1
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v32i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v32i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovd %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v32i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovd %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v32i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovd %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE-LABEL: test_v64i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pmullw %xmm6, %xmm2
-; SSE-NEXT: pmullw %xmm7, %xmm3
-; SSE-NEXT: pmullw %xmm5, %xmm3
-; SSE-NEXT: pmullw %xmm1, %xmm3
-; SSE-NEXT: pmullw %xmm4, %xmm2
-; SSE-NEXT: pmullw %xmm3, %xmm2
-; SSE-NEXT: pmullw %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: pmullw %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pmullw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pmullw %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v64i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v64i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovd %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v64i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovd %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v64i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovd %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.mul.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmullw %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: packuswb %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,xmm1[6],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero
-; SSE41-NEXT: pmullw %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,2,3,0]
-; SSE2-NEXT: pmullw %xmm1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: packuswb %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmullw %xmm1, %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,xmm1[6],zero,xmm1[10],zero,xmm1[14],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero,xmm1[u],zero
-; SSE41-NEXT: pmullw %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[10],zero,xmm0[14],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[10],zero,xmm0[14],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512VL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3]
-; AVX512VL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[10],zero,xmm0[14],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX512VL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512VL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmullw %xmm0, %xmm1, %xmm0
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,xmm0[6],zero,xmm0[10],zero,xmm0[14],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero,xmm0[u],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,3,3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: pmullw %xmm1, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmullw %xmm3, %xmm0
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm3, %xmm0
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmullw %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4],zero,xmm0[6],zero,xmm0[4],zero,xmm0[6],zero,xmm0[8],zero,xmm0[10],zero,xmm0[12],zero,xmm0[14],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[1,1,2,3,4,5,6,7]
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v16i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v16i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQ-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQ-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v16i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQVL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQVL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQVL-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQVL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm2, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm3, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,3,3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: pmullw %xmm2, %xmm3
-; SSE41-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: pmullw %xmm1, %xmm3
-; SSE41-NEXT: pmullw %xmm0, %xmm3
-; SSE41-NEXT: pand %xmm2, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmullw %xmm1, %xmm3
-; SSE41-NEXT: pand %xmm2, %xmm3
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm1, %xmm3
-; SSE41-NEXT: pand %xmm2, %xmm3
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmullw %xmm3, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v32i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v32i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpsrld $16, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v32i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v32i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm5, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm3, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: pmullw %xmm4, %xmm0
-; SSE2-NEXT: pmullw %xmm5, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,3,3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm3, %xmm1
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm1, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: pmullw %xmm3, %xmm6
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm3, %xmm6
-; SSE41-NEXT: pmullw %xmm4, %xmm5
-; SSE41-NEXT: pshufb %xmm3, %xmm5
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; SSE41-NEXT: pmullw %xmm4, %xmm5
-; SSE41-NEXT: pshufb %xmm3, %xmm5
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; SSE41-NEXT: pmullw %xmm2, %xmm3
-; SSE41-NEXT: pmullw %xmm0, %xmm3
-; SSE41-NEXT: pand %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmullw %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm1, %xmm3
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm1, %xmm3
-; SSE41-NEXT: packuswb %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmullw %xmm3, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v64i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BW-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v64i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BWVL-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v64i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQ-NEXT: vpmullw %ymm2, %ymm3, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v64i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQVL-NEXT: vpmullw %ymm2, %ymm3, %ymm2
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQVL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm6, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm8, %xmm9
-; SSE2-NEXT: movdqa %xmm4, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm9, %xmm10
-; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm8, %xmm11
-; SSE2-NEXT: movdqa %xmm5, %xmm12
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm0[8],xmm12[9],xmm0[9],xmm12[10],xmm0[10],xmm12[11],xmm0[11],xmm12[12],xmm0[12],xmm12[13],xmm0[13],xmm12[14],xmm0[14],xmm12[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm11, %xmm12
-; SSE2-NEXT: movdqa %xmm1, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: pmullw %xmm12, %xmm8
-; SSE2-NEXT: pmullw %xmm10, %xmm8
-; SSE2-NEXT: pmullw %xmm9, %xmm8
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm6, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm2, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm7, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm3, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm5, %xmm1
-; SSE2-NEXT: pmullw %xmm4, %xmm1
-; SSE2-NEXT: pmullw %xmm8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,3,3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm6, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm10 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm2, %xmm4
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm11 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm7, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm3, %xmm5
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE41-NEXT: pmullw %xmm5, %xmm1
-; SSE41-NEXT: pmullw %xmm4, %xmm1
-; SSE41-NEXT: pmullw %xmm7, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm5, %xmm3
-; SSE41-NEXT: pmullw %xmm11, %xmm6
-; SSE41-NEXT: pshufb %xmm5, %xmm6
-; SSE41-NEXT: pmullw %xmm10, %xmm2
-; SSE41-NEXT: pshufb %xmm5, %xmm2
-; SSE41-NEXT: pmullw %xmm8, %xmm9
-; SSE41-NEXT: pshufb %xmm5, %xmm9
-; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: pmullw %xmm7, %xmm2
-; SSE41-NEXT: pshufb %xmm5, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: pmullw %xmm6, %xmm3
-; SSE41-NEXT: pshufb %xmm5, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: pmullw %xmm3, %xmm2
-; SSE41-NEXT: pshufb %xmm5, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: pmullw %xmm1, %xmm2
-; SSE41-NEXT: pmullw %xmm0, %xmm2
-; SSE41-NEXT: pand %xmm4, %xmm2
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: packuswb %xmm0, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmullw %xmm1, %xmm2
-; SSE41-NEXT: pand %xmm4, %xmm2
-; SSE41-NEXT: packuswb %xmm0, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE41-NEXT: pmullw %xmm1, %xmm2
-; SSE41-NEXT: pand %xmm4, %xmm2
-; SSE41-NEXT: packuswb %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmullw %xmm2, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm8
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm7, %xmm5, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm10, %xmm5, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; AVX1-NEXT: vpmullw %xmm10, %xmm6, %xmm6
-; AVX1-NEXT: vpmullw %xmm6, %xmm8, %xmm6
-; AVX1-NEXT: vpmullw %xmm6, %xmm9, %xmm6
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-NEXT: vpmullw %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm7 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-NEXT: vpmullw %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm4
-; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmullw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
-; AVX2-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v128i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm4
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm1
-; AVX512BW-NEXT: vpackuswb %zmm4, %zmm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BW-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512BW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512BW-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512BW-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512BWVL-LABEL: test_v128i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm2
-; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm4
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm1
-; AVX512BWVL-NEXT: vpackuswb %zmm4, %zmm1, %zmm1
-; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
-; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
-; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
-; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BWVL-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512BWVL-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; AVX512DQ-LABEL: test_v128i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512DQ-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; AVX512DQ-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQ-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512DQ-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpand %ymm1, %ymm4, %ymm1
-; AVX512DQ-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQ-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512DQVL-LABEL: test_v128i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512DQVL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
-; AVX512DQVL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQVL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
-; AVX512DQVL-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm2, %ymm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQVL-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQVL-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpand %ymm1, %ymm4, %ymm1
-; AVX512DQVL-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
-; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm3, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpand %xmm1, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512DQVL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.mul.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.mul.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.mul.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-or-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-or-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-or-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-or-widen.ll (removed)
@@ -1,1168 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE-LABEL: test_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE-LABEL: test_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE-LABEL: test_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.or.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE-LABEL: test_v16i64:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.or.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE-LABEL: test_v2i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE-LABEL: test_v4i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE-LABEL: test_v8i32:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE-LABEL: test_v16i32:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.or.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE-LABEL: test_v32i32:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.or.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE-LABEL: test_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE-LABEL: test_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE-LABEL: test_v8i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE-LABEL: test_v16i16:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE-LABEL: test_v32i16:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: por %xmm2, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE-LABEL: test_v64i16:
-; SSE: # %bb.0:
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
-; SSE-NEXT: por %xmm4, %xmm2
-; SSE-NEXT: por %xmm3, %xmm2
-; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.or.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: por %xmm3, %xmm1
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: por %xmm7, %xmm3
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: por %xmm6, %xmm2
-; SSE41-NEXT: por %xmm7, %xmm3
-; SSE41-NEXT: por %xmm5, %xmm3
-; SSE41-NEXT: por %xmm1, %xmm3
-; SSE41-NEXT: por %xmm4, %xmm2
-; SSE41-NEXT: por %xmm3, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: por %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v128i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.or.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.or.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.or.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll (removed)
@@ -1,2001 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE2-LABEL: test_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v2i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE2-LABEL: test_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pxor %xmm3, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm4, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm5
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v4i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v4i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smax.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE2-LABEL: test_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm5
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: por %xmm0, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm2, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: movdqa %xmm0, %xmm7
-; SSE41-NEXT: pxor %xmm5, %xmm7
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm7, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
-; SSE41-NEXT: movapd %xmm3, %xmm0
-; SSE41-NEXT: xorpd %xmm5, %xmm0
-; SSE41-NEXT: movapd %xmm2, %xmm1
-; SSE41-NEXT: xorpd %xmm5, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: pxor %xmm5, %xmm2
-; SSE41-NEXT: pxor %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm5, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smax.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE2-LABEL: test_v16i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
-; SSE2-NEXT: pand %xmm12, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
-; SSE2-NEXT: por %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm9
-; SSE2-NEXT: por %xmm1, %xmm9
-; SSE2-NEXT: movdqa %xmm7, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT: pand %xmm11, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,3,3]
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT: pand %xmm10, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm4
-; SSE2-NEXT: por %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm9
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm9, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
-; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm10
-; SSE41-NEXT: pxor %xmm9, %xmm10
-; SSE41-NEXT: movdqa %xmm10, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,0,2,2]
-; SSE41-NEXT: pand %xmm11, %xmm0
-; SSE41-NEXT: por %xmm10, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm10, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT: movdqa %xmm4, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm8, %xmm3
-; SSE41-NEXT: pxor %xmm9, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
-; SSE41-NEXT: movapd %xmm6, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm4, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
-; SSE41-NEXT: movapd %xmm7, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm5, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
-; SSE41-NEXT: movapd %xmm7, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm6, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm7, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: pxor %xmm1, %xmm9
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm5, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm9
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm10
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vblendvpd %xmm4, %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vblendvpd %xmm10, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm9, %xmm6, %xmm7, %xmm3
-; AVX1-NEXT: vblendvpd %xmm8, %xmm5, %xmm11, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smax.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE2-LABEL: test_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smax.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE2-LABEL: test_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmaxsd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE2-LABEL: test_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmaxsd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smax.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE2-LABEL: test_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsd %xmm3, %xmm1
-; SSE41-NEXT: pmaxsd %xmm2, %xmm1
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmaxsd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxsd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxsd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smax.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE2-LABEL: test_v32i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm8
-; SSE2-NEXT: pand %xmm8, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm8
-; SSE2-NEXT: por %xmm2, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pandn %xmm8, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsd %xmm6, %xmm2
-; SSE41-NEXT: pmaxsd %xmm7, %xmm3
-; SSE41-NEXT: pmaxsd %xmm5, %xmm3
-; SSE41-NEXT: pmaxsd %xmm1, %xmm3
-; SSE41-NEXT: pmaxsd %xmm4, %xmm2
-; SSE41-NEXT: pmaxsd %xmm3, %xmm2
-; SSE41-NEXT: pmaxsd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmaxsd %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmaxsd %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmaxsd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmaxsd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxsd %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smax.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE-LABEL: test_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pmaxsw %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE-LABEL: test_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pmaxsw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pmaxsw %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE2-LABEL: test_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE2-LABEL: test_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsw %xmm1, %xmm0
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE2-LABEL: test_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxsw %xmm3, %xmm1
-; SSE2-NEXT: pmaxsw %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsw %xmm3, %xmm1
-; SSE41-NEXT: pmaxsw %xmm2, %xmm1
-; SSE41-NEXT: pmaxsw %xmm0, %xmm1
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxsw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxsw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE2-LABEL: test_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxsw %xmm6, %xmm2
-; SSE2-NEXT: pmaxsw %xmm7, %xmm3
-; SSE2-NEXT: pmaxsw %xmm5, %xmm3
-; SSE2-NEXT: pmaxsw %xmm1, %xmm3
-; SSE2-NEXT: pmaxsw %xmm4, %xmm2
-; SSE2-NEXT: pmaxsw %xmm3, %xmm2
-; SSE2-NEXT: pmaxsw %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pmaxsw %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsw %xmm7, %xmm3
-; SSE41-NEXT: pmaxsw %xmm5, %xmm3
-; SSE41-NEXT: pmaxsw %xmm1, %xmm3
-; SSE41-NEXT: pmaxsw %xmm6, %xmm2
-; SSE41-NEXT: pmaxsw %xmm4, %xmm2
-; SSE41-NEXT: pmaxsw %xmm3, %xmm2
-; SSE41-NEXT: pmaxsw %xmm0, %xmm2
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm2
-; SSE41-NEXT: phminposuw %xmm2, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpmaxsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpmaxsw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxsw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxsw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32767, %eax # imm = 0x7FFF
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smax.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmaxsb %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pmaxsb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmaxsb %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxsb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmaxsb %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmaxsb %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $127, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: xorb $127, %al
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $127, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsb %xmm1, %xmm0
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $127, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $127, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $127, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $127, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsb %xmm3, %xmm1
-; SSE41-NEXT: pmaxsb %xmm2, %xmm1
-; SSE41-NEXT: pmaxsb %xmm0, %xmm1
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $127, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxsb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxsb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $127, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $127, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $127, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: pcmpgtb %xmm6, %xmm8
-; SSE2-NEXT: pand %xmm8, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm8
-; SSE2-NEXT: por %xmm2, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm7, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pandn %xmm8, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxsb %xmm7, %xmm3
-; SSE41-NEXT: pmaxsb %xmm5, %xmm3
-; SSE41-NEXT: pmaxsb %xmm1, %xmm3
-; SSE41-NEXT: pmaxsb %xmm6, %xmm2
-; SSE41-NEXT: pmaxsb %xmm4, %xmm2
-; SSE41-NEXT: pmaxsb %xmm3, %xmm2
-; SSE41-NEXT: pmaxsb %xmm0, %xmm2
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm2, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $127, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpmaxsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpmaxsb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxsb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $127, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxsb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxsb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $127, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v128i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $127, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smax.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smax.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smax.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smax.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.smax.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smax.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smax.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smax.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.smax.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smax.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smax.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smax.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smax.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.smax.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smax.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll (removed)
@@ -1,1999 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE2-LABEL: test_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v2i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE2-LABEL: test_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v4i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v4i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smin.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE2-LABEL: test_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm5
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm5
-; SSE2-NEXT: por %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movapd %xmm2, %xmm0
-; SSE41-NEXT: xorpd %xmm5, %xmm0
-; SSE41-NEXT: movapd %xmm3, %xmm1
-; SSE41-NEXT: xorpd %xmm5, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm4, %xmm2, %xmm3, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smin.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE2-LABEL: test_v16i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: movdqa %xmm6, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
-; SSE2-NEXT: pand %xmm12, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
-; SSE2-NEXT: por %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm9
-; SSE2-NEXT: por %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm11, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm10[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm10, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm4
-; SSE2-NEXT: por %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm9, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
-; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648]
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm10
-; SSE41-NEXT: pxor %xmm9, %xmm10
-; SSE41-NEXT: movdqa %xmm10, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,0,2,2]
-; SSE41-NEXT: pand %xmm11, %xmm0
-; SSE41-NEXT: por %xmm10, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm10, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm7, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm8, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movapd %xmm5, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm7, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
-; SSE41-NEXT: movapd %xmm4, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm6, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
-; SSE41-NEXT: movapd %xmm6, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm7, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm9
-; SSE41-NEXT: movdqa %xmm9, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm8
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm9
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm11
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm7, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm4, %xmm6
-; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vblendvpd %xmm10, %xmm11, %xmm7, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm6
-; AVX1-NEXT: vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vblendvpd %xmm6, %xmm4, %xmm5, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm4
-; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.smin.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE2-LABEL: test_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smin.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE2-LABEL: test_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pminsd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE2-LABEL: test_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pminsd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smin.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE2-LABEL: test_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsd %xmm3, %xmm1
-; SSE41-NEXT: pminsd %xmm2, %xmm1
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pminsd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminsd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminsd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smin.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE2-LABEL: test_v32i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm5, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
-; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm8
-; SSE2-NEXT: por %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm7, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: pandn %xmm1, %xmm0
-; SSE2-NEXT: por %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsd %xmm6, %xmm2
-; SSE41-NEXT: pminsd %xmm7, %xmm3
-; SSE41-NEXT: pminsd %xmm5, %xmm3
-; SSE41-NEXT: pminsd %xmm1, %xmm3
-; SSE41-NEXT: pminsd %xmm4, %xmm2
-; SSE41-NEXT: pminsd %xmm3, %xmm2
-; SSE41-NEXT: pminsd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pminsd %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminsd %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpminsd %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpminsd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpminsd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminsd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpminsd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpminsd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminsd %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminsd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.smin.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE-LABEL: test_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pminsw %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE-LABEL: test_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pminsw %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pminsw %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE2-LABEL: test_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE2-LABEL: test_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsw %xmm1, %xmm0
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE2-LABEL: test_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminsw %xmm3, %xmm1
-; SSE2-NEXT: pminsw %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsw %xmm3, %xmm1
-; SSE41-NEXT: pminsw %xmm2, %xmm1
-; SSE41-NEXT: pminsw %xmm0, %xmm1
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminsw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminsw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE2-LABEL: test_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminsw %xmm6, %xmm2
-; SSE2-NEXT: pminsw %xmm7, %xmm3
-; SSE2-NEXT: pminsw %xmm5, %xmm3
-; SSE2-NEXT: pminsw %xmm1, %xmm3
-; SSE2-NEXT: pminsw %xmm4, %xmm2
-; SSE2-NEXT: pminsw %xmm3, %xmm2
-; SSE2-NEXT: pminsw %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pminsw %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsw %xmm7, %xmm3
-; SSE41-NEXT: pminsw %xmm5, %xmm3
-; SSE41-NEXT: pminsw %xmm1, %xmm3
-; SSE41-NEXT: pminsw %xmm6, %xmm2
-; SSE41-NEXT: pminsw %xmm4, %xmm2
-; SSE41-NEXT: pminsw %xmm3, %xmm2
-; SSE41-NEXT: pminsw %xmm0, %xmm2
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm2
-; SSE41-NEXT: phminposuw %xmm2, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpminsw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpminsw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpminsw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminsw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminsw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminsw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminsw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: xorl $32768, %eax # imm = 0x8000
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.smin.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminsb %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pminsb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminsb %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminsb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pminsb %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminsb %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $-128, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: xorb $-128, %al
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $-128, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsb %xmm1, %xmm0
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $-128, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $-128, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $-128, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $-128, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsb %xmm3, %xmm1
-; SSE41-NEXT: pminsb %xmm2, %xmm1
-; SSE41-NEXT: pminsb %xmm0, %xmm1
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $-128, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminsb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminsb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $-128, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $-128, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $-128, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm5, %xmm8
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm8
-; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm8
-; SSE2-NEXT: por %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm7, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pcmpgtb %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: pandn %xmm1, %xmm0
-; SSE2-NEXT: por %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminsb %xmm7, %xmm3
-; SSE41-NEXT: pminsb %xmm5, %xmm3
-; SSE41-NEXT: pminsb %xmm1, %xmm3
-; SSE41-NEXT: pminsb %xmm6, %xmm2
-; SSE41-NEXT: pminsb %xmm4, %xmm2
-; SSE41-NEXT: pminsb %xmm3, %xmm2
-; SSE41-NEXT: pminsb %xmm0, %xmm2
-; SSE41-NEXT: pxor {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm2, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: xorb $-128, %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpminsb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpminsb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpminsb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminsb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: xorb $-128, %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminsb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminsb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: xorb $-128, %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v128i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminsb %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminsb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: xorb $-128, %al
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.smin.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smin.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smin.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.smin.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.smin.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smin.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smin.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smin.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.smin.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smin.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smin.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smin.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.smin.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.smin.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.smin.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll (removed)
@@ -1,2203 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE2-LABEL: test_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm3
-; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v2i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE2-LABEL: test_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: pxor %xmm3, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm4, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm5
-; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
-; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
-; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm4
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v4i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v4i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umax.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE2-LABEL: test_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm5
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: por %xmm0, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm2, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: movdqa %xmm0, %xmm7
-; SSE41-NEXT: pxor %xmm5, %xmm7
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm7, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
-; SSE41-NEXT: movapd %xmm3, %xmm0
-; SSE41-NEXT: xorpd %xmm5, %xmm0
-; SSE41-NEXT: movapd %xmm2, %xmm1
-; SSE41-NEXT: xorpd %xmm5, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: pxor %xmm5, %xmm2
-; SSE41-NEXT: pxor %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm5, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
-; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpxor %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vpxor %xmm2, %xmm6, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vxorpd %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm4, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm4
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umax.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE2-LABEL: test_v16i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
-; SSE2-NEXT: pand %xmm12, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
-; SSE2-NEXT: por %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm9
-; SSE2-NEXT: por %xmm1, %xmm9
-; SSE2-NEXT: movdqa %xmm7, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT: pand %xmm11, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,3,3]
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
-; SSE2-NEXT: pand %xmm10, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm4
-; SSE2-NEXT: por %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm9
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm9, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
-; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm10
-; SSE41-NEXT: pxor %xmm9, %xmm10
-; SSE41-NEXT: movdqa %xmm10, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,0,2,2]
-; SSE41-NEXT: pand %xmm11, %xmm0
-; SSE41-NEXT: por %xmm10, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm10, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT: movdqa %xmm4, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm8, %xmm3
-; SSE41-NEXT: pxor %xmm9, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm9, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
-; SSE41-NEXT: movapd %xmm6, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm4, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
-; SSE41-NEXT: movapd %xmm7, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm5, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
-; SSE41-NEXT: movapd %xmm7, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm6, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm7, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: pxor %xmm1, %xmm9
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm8
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm4, %xmm8, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm10
-; AVX1-NEXT: vpxor %xmm4, %xmm10, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm9
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpxor %xmm4, %xmm11, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpxor %xmm4, %xmm7, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm12
-; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm13
-; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm6
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT: vblendvpd %xmm13, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm12, %xmm7, %xmm11, %xmm3
-; AVX1-NEXT: vxorpd %xmm4, %xmm3, %xmm5
-; AVX1-NEXT: vblendvpd %xmm9, %xmm10, %xmm8, %xmm6
-; AVX1-NEXT: vxorpd %xmm4, %xmm6, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vxorpd %xmm4, %xmm3, %xmm5
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5
-; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm6
-; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5
-; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2
-; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm5, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm2
-; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm3
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm2
-; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm3
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umax.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE2-LABEL: test_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movd %xmm3, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umax.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE2-LABEL: test_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmaxud %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE2-LABEL: test_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movd %xmm3, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxud %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pmaxud %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umax.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE2-LABEL: test_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: por %xmm0, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxud %xmm3, %xmm1
-; SSE41-NEXT: pmaxud %xmm2, %xmm1
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmaxud %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxud %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umax.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE2-LABEL: test_v32i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm9
-; SSE2-NEXT: por %xmm1, %xmm9
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm4
-; SSE2-NEXT: por %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm9
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm9, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxud %xmm6, %xmm2
-; SSE41-NEXT: pmaxud %xmm7, %xmm3
-; SSE41-NEXT: pmaxud %xmm5, %xmm3
-; SSE41-NEXT: pmaxud %xmm1, %xmm3
-; SSE41-NEXT: pmaxud %xmm4, %xmm2
-; SSE41-NEXT: pmaxud %xmm3, %xmm2
-; SSE41-NEXT: pmaxud %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmaxud %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmaxud %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmaxud %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpmaxud %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxud %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmaxud %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmaxud %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxud %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxud %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmaxud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umax.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE2-LABEL: test_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pmaxuw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE2-LABEL: test_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxuw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmaxuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE2-LABEL: test_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: notl %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: notl %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: notl %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vmovd %xmm0, %eax
-; AVX512VL-NEXT: notl %eax
-; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512VL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE2-LABEL: test_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxuw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: notl %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: notl %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: notl %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: notl %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vmovd %xmm0, %eax
-; AVX512VL-NEXT: notl %eax
-; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE2-LABEL: test_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pmaxsw %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pmaxsw %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pmaxsw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxuw %xmm3, %xmm1
-; SSE41-NEXT: pmaxuw %xmm2, %xmm1
-; SSE41-NEXT: pmaxuw %xmm0, %xmm1
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: notl %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxuw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: notl %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: notl %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v32i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: notl %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v32i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vmovd %xmm0, %eax
-; AVX512VL-NEXT: notl %eax
-; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE2-LABEL: test_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm8, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pmaxsw %xmm6, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: pmaxsw %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm7
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pmaxsw %xmm7, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: pmaxsw %xmm3, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pmaxsw %xmm5, %xmm1
-; SSE2-NEXT: pmaxsw %xmm4, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pmaxsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pmaxsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxuw %xmm7, %xmm3
-; SSE41-NEXT: pmaxuw %xmm5, %xmm3
-; SSE41-NEXT: pmaxuw %xmm1, %xmm3
-; SSE41-NEXT: pmaxuw %xmm6, %xmm2
-; SSE41-NEXT: pmaxuw %xmm4, %xmm2
-; SSE41-NEXT: pmaxuw %xmm3, %xmm2
-; SSE41-NEXT: pmaxuw %xmm0, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: notl %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpmaxuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpmaxuw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxuw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxuw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: notl %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxuw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxuw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: notl %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v64i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vmovd %xmm0, %eax
-; AVX512BW-NEXT: notl %eax
-; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v64i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmaxuw %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vmovd %xmm0, %eax
-; AVX512VL-NEXT: notl %eax
-; AVX512VL-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umax.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmaxub %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pmaxub %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pmaxub %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmaxub %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmaxub %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pmaxub %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: notb %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: notb %al
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512BW-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: notb %al
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512VL-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512VL-NEXT: notb %al
-; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512VL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxub %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: notb %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: notb %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: notb %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v32i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512BW-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: notb %al
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v32i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512VL-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512VL-NEXT: notb %al
-; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxub %xmm3, %xmm1
-; SSE2-NEXT: pmaxub %xmm2, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxub %xmm3, %xmm1
-; SSE41-NEXT: pmaxub %xmm2, %xmm1
-; SSE41-NEXT: pmaxub %xmm0, %xmm1
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: notb %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmaxub %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: notb %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: notb %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v64i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512BW-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: notb %al
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v64i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512VL-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512VL-NEXT: notb %al
-; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmaxub %xmm6, %xmm2
-; SSE2-NEXT: pmaxub %xmm7, %xmm3
-; SSE2-NEXT: pmaxub %xmm5, %xmm3
-; SSE2-NEXT: pmaxub %xmm1, %xmm3
-; SSE2-NEXT: pmaxub %xmm4, %xmm2
-; SSE2-NEXT: pmaxub %xmm3, %xmm2
-; SSE2-NEXT: pmaxub %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pmaxub %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pmaxub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmaxub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmaxub %xmm7, %xmm3
-; SSE41-NEXT: pmaxub %xmm5, %xmm3
-; SSE41-NEXT: pmaxub %xmm1, %xmm3
-; SSE41-NEXT: pmaxub %xmm6, %xmm2
-; SSE41-NEXT: pmaxub %xmm4, %xmm2
-; SSE41-NEXT: pmaxub %xmm3, %xmm2
-; SSE41-NEXT: pmaxub %xmm0, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: notb %al
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpmaxub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpmaxub %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxub %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmaxub %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: notb %al
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmaxub %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmaxub %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: notb %al
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v128i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512BW-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512BW-NEXT: notb %al
-; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v128i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512VL-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512VL-NEXT: notb %al
-; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umax.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umax.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umax.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umax.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.umax.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umax.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umax.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umax.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.umax.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umax.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umax.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umax.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umax.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.umax.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umax.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll (removed)
@@ -1,2007 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE2-LABEL: test_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm3
-; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm2
-; AVX-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
-;
-; AVX512BW-LABEL: test_v2i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v2i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE2-LABEL: test_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm3, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
-; SSE41-NEXT: pand %xmm5, %xmm0
-; SSE41-NEXT: por %xmm4, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movq %xmm2, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpxor %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm1, %xmm0, %xmm3
-; AVX1-NEXT: vxorpd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm4
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v4i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v4i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umin.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE2-LABEL: test_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm5
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm5
-; SSE2-NEXT: por %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: movdqa %xmm6, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movapd %xmm2, %xmm0
-; SSE41-NEXT: xorpd %xmm5, %xmm0
-; SSE41-NEXT: movapd %xmm3, %xmm1
-; SSE41-NEXT: xorpd %xmm5, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm4, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm5, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm6
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vblendvpd %xmm6, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm1
-; AVX1-NEXT: vblendvpd %xmm4, %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vxorpd %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm2
-; AVX1-NEXT: vxorpd %xmm3, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm4
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
-; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v8i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v8i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umin.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE2-LABEL: test_v16i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: movdqa %xmm6, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm9, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
-; SSE2-NEXT: pand %xmm12, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm11[1,1,3,3]
-; SSE2-NEXT: por %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm9
-; SSE2-NEXT: por %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2-NEXT: pand %xmm11, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm10[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm10, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm7, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm4
-; SSE2-NEXT: por %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm9, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm8
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm8
-; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm10
-; SSE41-NEXT: pxor %xmm9, %xmm10
-; SSE41-NEXT: movdqa %xmm10, %xmm11
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[0,0,2,2]
-; SSE41-NEXT: pand %xmm11, %xmm0
-; SSE41-NEXT: por %xmm10, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm6
-; SSE41-NEXT: movdqa %xmm8, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm10
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm10, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm7, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm8
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm8
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm8, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: movdqa %xmm5, %xmm2
-; SSE41-NEXT: pxor %xmm9, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movapd %xmm5, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm7, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm7
-; SSE41-NEXT: movapd %xmm4, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm6, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
-; SSE41-NEXT: movapd %xmm6, %xmm0
-; SSE41-NEXT: xorpd %xmm9, %xmm0
-; SSE41-NEXT: movapd %xmm7, %xmm1
-; SSE41-NEXT: xorpd %xmm9, %xmm1
-; SSE41-NEXT: movapd %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm1, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
-; SSE41-NEXT: movdqa %xmm7, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm9
-; SSE41-NEXT: movdqa %xmm9, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm9
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,2,2]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm9, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm5
-; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm8
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm9
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm12
-; AVX1-NEXT: vpxor %xmm4, %xmm12, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm13
-; AVX1-NEXT: vpxor %xmm4, %xmm13, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm10, %xmm5, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm11
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vpxor %xmm4, %xmm7, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm6, %xmm6
-; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vxorpd %xmm4, %xmm5, %xmm11
-; AVX1-NEXT: vblendvpd %xmm10, %xmm12, %xmm13, %xmm7
-; AVX1-NEXT: vxorpd %xmm4, %xmm7, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm6, %xmm6
-; AVX1-NEXT: vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT: vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm1
-; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm7, %xmm2
-; AVX1-NEXT: vxorpd %xmm4, %xmm2, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
-; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm6
-; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5
-; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm3
-; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
-; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm2
-; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm3
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512BW-LABEL: test_v16i64:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512BW-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovq %xmm0, %rax
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512VL-LABEL: test_v16i64:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vmovq %xmm0, %rax
-; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.umin.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE2-LABEL: test_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umin.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE2-LABEL: test_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pminud %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE2-LABEL: test_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm4, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminud %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pminud %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umin.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE2-LABEL: test_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
-; SSE2-NEXT: pand %xmm6, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm6
-; SSE2-NEXT: por %xmm1, %xmm6
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pandn %xmm2, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm6, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: movd %xmm4, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminud %xmm3, %xmm1
-; SSE41-NEXT: pminud %xmm2, %xmm1
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pminud %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umin.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE2-LABEL: test_v32i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm10
-; SSE2-NEXT: movdqa %xmm6, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm10, %xmm9
-; SSE2-NEXT: pand %xmm9, %xmm2
-; SSE2-NEXT: pandn %xmm6, %xmm9
-; SSE2-NEXT: por %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm6
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: pandn %xmm7, %xmm4
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm5, %xmm3
-; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: pandn %xmm4, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm9, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm9, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: por %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm8
-; SSE2-NEXT: por %xmm3, %xmm8
-; SSE2-NEXT: movd %xmm8, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminud %xmm6, %xmm2
-; SSE41-NEXT: pminud %xmm7, %xmm3
-; SSE41-NEXT: pminud %xmm5, %xmm3
-; SSE41-NEXT: pminud %xmm1, %xmm3
-; SSE41-NEXT: pminud %xmm4, %xmm2
-; SSE41-NEXT: pminud %xmm3, %xmm2
-; SSE41-NEXT: pminud %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pminud %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminud %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpminud %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminud %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpminud %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpminud %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminud %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminud %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminud %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.umin.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE2-LABEL: test_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pminuw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE2-LABEL: test_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminuw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pminuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE2-LABEL: test_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE2-LABEL: test_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminuw %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE2-LABEL: test_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pminsw %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pminsw %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pminsw %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminuw %xmm3, %xmm1
-; SSE41-NEXT: pminuw %xmm2, %xmm1
-; SSE41-NEXT: pminuw %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminuw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE2-LABEL: test_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32768,32768,32768,32768,32768,32768,32768,32768]
-; SSE2-NEXT: pxor %xmm8, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm2
-; SSE2-NEXT: pminsw %xmm6, %xmm2
-; SSE2-NEXT: pxor %xmm8, %xmm4
-; SSE2-NEXT: pminsw %xmm2, %xmm4
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm7
-; SSE2-NEXT: pxor %xmm8, %xmm3
-; SSE2-NEXT: pminsw %xmm7, %xmm3
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: pminsw %xmm3, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pminsw %xmm5, %xmm1
-; SSE2-NEXT: pminsw %xmm4, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: pminsw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: pminsw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl $32768, %eax # imm = 0x8000
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminuw %xmm7, %xmm3
-; SSE41-NEXT: pminuw %xmm5, %xmm3
-; SSE41-NEXT: pminuw %xmm1, %xmm3
-; SSE41-NEXT: pminuw %xmm6, %xmm2
-; SSE41-NEXT: pminuw %xmm4, %xmm2
-; SSE41-NEXT: pminuw %xmm3, %xmm2
-; SSE41-NEXT: pminuw %xmm0, %xmm2
-; SSE41-NEXT: phminposuw %xmm2, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpminuw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpminuw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpminuw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminuw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminuw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminuw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminuw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.umin.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vphminposuw %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
-;
-; AVX512-LABEL: test_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: phminposuw %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminub %xmm3, %xmm1
-; SSE2-NEXT: pminub %xmm2, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminub %xmm3, %xmm1
-; SSE41-NEXT: pminub %xmm2, %xmm1
-; SSE41-NEXT: pminub %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm1, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpminub %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pminub %xmm6, %xmm2
-; SSE2-NEXT: pminub %xmm7, %xmm3
-; SSE2-NEXT: pminub %xmm5, %xmm3
-; SSE2-NEXT: pminub %xmm1, %xmm3
-; SSE2-NEXT: pminub %xmm4, %xmm2
-; SSE2-NEXT: pminub %xmm3, %xmm2
-; SSE2-NEXT: pminub %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pminub %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pminub %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pminub %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pminub %xmm7, %xmm3
-; SSE41-NEXT: pminub %xmm5, %xmm3
-; SSE41-NEXT: pminub %xmm1, %xmm3
-; SSE41-NEXT: pminub %xmm6, %xmm2
-; SSE41-NEXT: pminub %xmm4, %xmm2
-; SSE41-NEXT: pminub %xmm3, %xmm2
-; SSE41-NEXT: pminub %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pminub %xmm2, %xmm0
-; SSE41-NEXT: phminposuw %xmm0, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpminub %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpminub %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpminub %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vphminposuw %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpminub %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpminub %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vphminposuw %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v128i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpminub %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vphminposuw %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.umin.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umin.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umin.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.umin.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.umin.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umin.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umin.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umin.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.umin.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umin.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umin.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umin.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.umin.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.umin.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.umin.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-reduce-xor-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-xor-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-xor-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-xor-widen.ll (removed)
@@ -1,1168 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
-
-;
-; vXi64
-;
-
-define i64 @test_v2i64(<2 x i64> %a0) {
-; SSE-LABEL: test_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v4i64(<4 x i64> %a0) {
-; SSE-LABEL: test_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v8i64(<8 x i64> %a0) {
-; SSE-LABEL: test_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.xor.v8i64(<8 x i64> %a0)
- ret i64 %1
-}
-
-define i64 @test_v16i64(<16 x i64> %a0) {
-; SSE-LABEL: test_v16i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm6, %xmm2
-; SSE-NEXT: pxor %xmm7, %xmm3
-; SSE-NEXT: pxor %xmm5, %xmm3
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i64 @llvm.experimental.vector.reduce.xor.v16i64(<16 x i64> %a0)
- ret i64 %1
-}
-
-;
-; vXi32
-;
-
-define i32 @test_v2i32(<2 x i32> %a0) {
-; SSE-LABEL: test_v2i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v4i32(<4 x i32> %a0) {
-; SSE-LABEL: test_v4i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v8i32(<8 x i32> %a0) {
-; SSE-LABEL: test_v8i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v16i32(<16 x i32> %a0) {
-; SSE-LABEL: test_v16i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.xor.v16i32(<16 x i32> %a0)
- ret i32 %1
-}
-
-define i32 @test_v32i32(<32 x i32> %a0) {
-; SSE-LABEL: test_v32i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm6, %xmm2
-; SSE-NEXT: pxor %xmm7, %xmm3
-; SSE-NEXT: pxor %xmm5, %xmm3
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v32i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i32 @llvm.experimental.vector.reduce.xor.v32i32(<32 x i32> %a0)
- ret i32 %1
-}
-
-;
-; vXi16
-;
-
-define i16 @test_v2i16(<2 x i16> %a0) {
-; SSE-LABEL: test_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v2i16(<2 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v4i16(<4 x i16> %a0) {
-; SSE-LABEL: test_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v8i16(<8 x i16> %a0) {
-; SSE-LABEL: test_v8i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v16i16(<16 x i16> %a0) {
-; SSE-LABEL: test_v16i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v16i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v32i16(<32 x i16> %a0) {
-; SSE-LABEL: test_v32i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: pxor %xmm2, %xmm1
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v32i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v32i16(<32 x i16> %a0)
- ret i16 %1
-}
-
-define i16 @test_v64i16(<64 x i16> %a0) {
-; SSE-LABEL: test_v64i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm6, %xmm2
-; SSE-NEXT: pxor %xmm7, %xmm3
-; SSE-NEXT: pxor %xmm5, %xmm3
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: test_v64i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i16 @llvm.experimental.vector.reduce.xor.v64i16(<64 x i16> %a0)
- ret i16 %1
-}
-
-;
-; vXi8
-;
-
-define i8 @test_v2i8(<2 x i8> %a0) {
-; SSE2-LABEL: test_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v2i8(<2 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v4i8(<4 x i8> %a0) {
-; SSE2-LABEL: test_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v4i8(<4 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpextrb $0, %xmm0, %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
-; AVX-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pextrb $0, %xmm0, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v32i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm3, %xmm1
-; SSE41-NEXT: pxor %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v64i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v64i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v64i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v64i8(<64 x i8> %a0)
- ret i8 %1
-}
-
-define i8 @test_v128i8(<128 x i8> %a0) {
-; SSE2-LABEL: test_v128i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm6, %xmm2
-; SSE2-NEXT: pxor %xmm7, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v128i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm6, %xmm2
-; SSE41-NEXT: pxor %xmm7, %xmm3
-; SSE41-NEXT: pxor %xmm5, %xmm3
-; SSE41-NEXT: pxor %xmm1, %xmm3
-; SSE41-NEXT: pxor %xmm4, %xmm2
-; SSE41-NEXT: pxor %xmm3, %xmm2
-; SSE41-NEXT: pxor %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm1
-; SSE41-NEXT: pextrb $0, %xmm1, %eax
-; SSE41-NEXT: # kill: def $al killed $al killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v128i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: def $al killed $al killed $eax
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v128i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; AVX2-NEXT: # kill: def $al killed $al killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v128i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: def $al killed $al killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
- %1 = call i8 @llvm.experimental.vector.reduce.xor.v128i8(<128 x i8> %a0)
- ret i8 %1
-}
-
-declare i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v8i64(<8 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v16i64(<16 x i64>)
-
-declare i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v32i32(<32 x i32>)
-
-declare i16 @llvm.experimental.vector.reduce.xor.v2i16(<2 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v32i16(<32 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v64i16(<64 x i16>)
-
-declare i8 @llvm.experimental.vector.reduce.xor.v2i8(<2 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v4i8(<4 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v64i8(<64 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v128i8(<128 x i8>)
Removed: llvm/trunk/test/CodeGen/X86/vector-sext-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-sext-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-sext-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-sext-widen.ll (removed)
@@ -1,3966 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
-;
-; Just two 32-bit runs to make sure we do reasonable things there.
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE41
-
-define <8 x i16> @sext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_8i16:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_8i16:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psraw $8, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_8i16:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_16i8_to_8i16:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_8i16:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psraw $8, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_8i16:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %C = sext <8 x i8> %B to <8 x i16>
- ret <8 x i16> %C
-}
-
-define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_16i16:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_16i16:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSSE3-NEXT: psraw $8, %xmm2
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSSE3-NEXT: psraw $8, %xmm1
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_16i16:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i8_to_16i16:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i8_to_16i16:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i8_to_16i16:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_16i16:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X32-SSE2-NEXT: psraw $8, %xmm2
-; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE2-NEXT: psraw $8, %xmm1
-; X32-SSE2-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_16i16:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <16 x i8> %A to <16 x i16>
- ret <16 x i16> %B
-}
-
-define <32 x i16> @sext_32i8_to_32i16(<32 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_32i8_to_32i16:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_32i8_to_32i16:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSSE3-NEXT: psraw $8, %xmm4
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSSE3-NEXT: psraw $8, %xmm5
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSSE3-NEXT: psraw $8, %xmm2
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSSE3-NEXT: psraw $8, %xmm3
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: movdqa %xmm5, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_32i8_to_32i16:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm5
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_32i8_to_32i16:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_32i8_to_32i16:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: sext_32i8_to_32i16:
-; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm2
-; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm1
-; AVX512F-NEXT: vmovdqa %ymm2, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: sext_32i8_to_32i16:
-; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
-; AVX512BW-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_32i8_to_32i16:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; X32-SSE2-NEXT: psraw $8, %xmm4
-; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; X32-SSE2-NEXT: psraw $8, %xmm5
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; X32-SSE2-NEXT: psraw $8, %xmm2
-; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; X32-SSE2-NEXT: psraw $8, %xmm3
-; X32-SSE2-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm5, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_32i8_to_32i16:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm5
-; X32-SSE41-NEXT: pmovsxbw %xmm1, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm4
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm5, %xmm0
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <32 x i8> %A to <32 x i16>
- ret <32 x i16> %B
-}
-
-define <4 x i32> @sext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_4i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_4i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_4i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_16i8_to_4i32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_4i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_4i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %C = sext <4 x i8> %B to <4 x i32>
- ret <4 x i32> %C
-}
-
-define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_8i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_8i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm2
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_8i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i8_to_8i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i8_to_8i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i8_to_8i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbd %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_8i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm2
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_8i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %C = sext <8 x i8> %B to <8 x i32>
- ret <8 x i32> %C
-}
-
-define <16 x i32> @sext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_16i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE2-NEXT: psrad $24, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: psrad $24, %xmm3
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_16i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSSE3-NEXT: psrad $24, %xmm4
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm2
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT: psrad $24, %xmm3
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_16i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxbd %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxbd %xmm2, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i8_to_16i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
-; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i8_to_16i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbd %xmm0, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmovsxbd %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i8_to_16i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_16i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $24, %xmm4
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm2
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $24, %xmm3
-; X32-SSE2-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_16i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm4
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; X32-SSE41-NEXT: pmovsxbd %xmm1, %xmm1
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxbd %xmm2, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <16 x i8> %A to <16 x i32>
- ret <16 x i32> %B
-}
-
-define <2 x i64> @sext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_16i8_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbq %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
- %C = sext <2 x i8> %B to <2 x i64>
- ret <2 x i64> %C
-}
-
-define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm2
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i8_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i8_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbq %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i8_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbq %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm2
-; X32-SSE41-NEXT: psrld $16, %xmm0
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %C = sext <4 x i8> %B to <4 x i64>
- ret <4 x i64> %C
-}
-
-define <8 x i64> @sext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i8_to_8i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: psrad $24, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i8_to_8i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: pxor %xmm5, %xmm5
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm4
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT: psrad $24, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
-; SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i8_to_8i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm4
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pmovsxbq %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxbq %xmm2, %xmm2
-; SSE41-NEXT: psrlq $48, %xmm0
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i8_to_8i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
-; AVX1-NEXT: vpmovsxbq %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i8_to_8i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbq %xmm0, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX2-NEXT: vpmovsxbq %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i8_to_8i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbq %xmm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i8_to_8i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: pxor %xmm5, %xmm5
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $24, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; X32-SSE2-NEXT: movdqa %xmm3, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; X32-SSE2-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i8_to_8i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm4
-; X32-SSE41-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE41-NEXT: psrld $16, %xmm1
-; X32-SSE41-NEXT: pmovsxbq %xmm1, %xmm1
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; X32-SSE41-NEXT: pmovsxbq %xmm2, %xmm2
-; X32-SSE41-NEXT: psrlq $48, %xmm0
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %C = sext <8 x i8> %B to <8 x i64>
- ret <8 x i64> %C
-}
-
-define <4 x i32> @sext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i16_to_4i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i16_to_4i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i16_to_4i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_8i16_to_4i32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i16_to_4i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i16_to_4i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %C = sext <4 x i16> %B to <4 x i32>
- ret <4 x i32> %C
-}
-
-define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i16_to_8i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i16_to_8i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSSE3-NEXT: psrad $16, %xmm2
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i16_to_8i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_8i16_to_8i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_8i16_to_8i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_8i16_to_8i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i16_to_8i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $16, %xmm2
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i16_to_8i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <8 x i16> %A to <8 x i32>
- ret <8 x i32> %B
-}
-
-define <16 x i32> @sext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_16i16_to_16i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_16i16_to_16i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSSE3-NEXT: psrad $16, %xmm4
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSSE3-NEXT: psrad $16, %xmm5
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSSE3-NEXT: psrad $16, %xmm2
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSSE3-NEXT: psrad $16, %xmm3
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: movdqa %xmm5, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_16i16_to_16i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm5
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_16i16_to_16i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_16i16_to_16i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_16i16_to_16i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_16i16_to_16i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $16, %xmm4
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $16, %xmm5
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $16, %xmm2
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; X32-SSE2-NEXT: psrad $16, %xmm3
-; X32-SSE2-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm5, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_16i16_to_16i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm5
-; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm4
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm5, %xmm0
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <16 x i16> %A to <16 x i32>
- ret <16 x i32> %B
-}
-
-define <2 x i64> @sext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i16_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i16_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i16_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_8i16_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxwq %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i16_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i16_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <8 x i16> %A, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
- %C = sext <2 x i16> %B to <2 x i64>
- ret <2 x i64> %C
-}
-
-define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i16_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i16_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i16_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_8i16_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovsxwq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_8i16_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_8i16_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwq %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i16_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i16_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %C = sext <4 x i16> %B to <4 x i64>
- ret <4 x i64> %C
-}
-
-define <8 x i64> @sext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i16_to_8i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i16_to_8i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: pxor %xmm5, %xmm5
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm4
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSSE3-NEXT: psrad $16, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
-; SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i16_to_8i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwq %xmm2, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_8i16_to_8i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovsxwq %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxwq %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
-; AVX1-NEXT: vpmovsxwq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_8i16_to_8i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwq %xmm0, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX2-NEXT: vpmovsxwq %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_8i16_to_8i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i16_to_8i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: pxor %xmm5, %xmm5
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm4
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; X32-SSE2-NEXT: psrad $16, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; X32-SSE2-NEXT: movdqa %xmm3, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; X32-SSE2-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i16_to_8i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm4
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; X32-SSE41-NEXT: pmovsxwq %xmm1, %xmm1
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwq %xmm2, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <8 x i16> %A to <8 x i64>
- ret <8 x i64> %B
-}
-
-define <2 x i64> @sext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_4i32_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_4i32_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_4i32_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_4i32_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_4i32_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_4i32_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
- %C = sext <2 x i32> %B to <2 x i64>
- ret <2 x i64> %C
-}
-
-define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_4i32_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_4i32_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_4i32_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_4i32_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_4i32_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_4i32_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_4i32_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_4i32_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <4 x i32> %A to <4 x i64>
- ret <4 x i64> %B
-}
-
-define <8 x i64> @sext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i32_to_8i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i32_to_8i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
-; SSSE3-NEXT: pxor %xmm5, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm5
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i32_to_8i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm5
-; SSE41-NEXT: pmovsxdq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm3
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm4, %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_8i32_to_8i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
-; AVX1-NEXT: vmovaps %ymm2, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_8i32_to_8i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm1
-; AVX2-NEXT: vmovdqa %ymm2, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_8i32_to_8i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i32_to_8i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE2-NEXT: pxor %xmm4, %xmm4
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; X32-SSE2-NEXT: pxor %xmm5, %xmm5
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm5
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X32-SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i32_to_8i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm5
-; X32-SSE41-NEXT: pmovsxdq %xmm1, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm4
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm5, %xmm0
-; X32-SSE41-NEXT: movdqa %xmm4, %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %B = sext <8 x i32> %A to <8 x i64>
- ret <8 x i64> %B
-}
-
-define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
-; SSE-LABEL: load_sext_2i1_to_2i64:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: movzbl (%rdi), %eax
-; SSE-NEXT: movq %rax, %rcx
-; SSE-NEXT: shlq $62, %rcx
-; SSE-NEXT: movq %rcx, %xmm0
-; SSE-NEXT: shlq $63, %rax
-; SSE-NEXT: movq %rax, %xmm1
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT: psrad $31, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: load_sext_2i1_to_2i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: movzbl (%rdi), %eax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $62, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: shlq $63, %rax
-; AVX1-NEXT: vmovq %rax, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_2i1_to_2i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: movzbl (%rdi), %eax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $62, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: shlq $63, %rax
-; AVX2-NEXT: vmovq %rax, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_2i1_to_2i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_2i1_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movzbl (%eax), %eax
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shll $30, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
-; X32-SSE2-NEXT: shll $31, %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT: psrad $31, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_2i1_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movzbl (%eax), %eax
-; X32-SSE41-NEXT: movl %eax, %ecx
-; X32-SSE41-NEXT: shll $31, %ecx
-; X32-SSE41-NEXT: movd %ecx, %xmm0
-; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; X32-SSE41-NEXT: shll $30, %eax
-; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm0
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT: psrad $31, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <2 x i1>, <2 x i1>* %ptr
- %Y = sext <2 x i1> %X to <2 x i64>
- ret <2 x i64> %Y
-}
-
-define <2 x i64> @load_sext_2i8_to_2i64(<2 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_2i8_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movzwl (%rdi), %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_2i8_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movzwl (%rdi), %eax
-; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_2i8_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_2i8_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_2i8_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movzwl (%eax), %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_2i8_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <2 x i8>, <2 x i8>* %ptr
- %Y = sext <2 x i8> %X to <2 x i64>
- ret <2 x i64> %Y
-}
-
-define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
-; SSE2-LABEL: load_sext_4i1_to_4i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movl (%rdi), %eax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shlq $60, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shlq $61, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shlq $62, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: shlq $63, %rax
-; SSE2-NEXT: sarq $63, %rax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i1_to_4i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movl (%rdi), %eax
-; SSSE3-NEXT: movq %rax, %rcx
-; SSSE3-NEXT: shlq $60, %rcx
-; SSSE3-NEXT: sarq $63, %rcx
-; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: movq %rax, %rcx
-; SSSE3-NEXT: shlq $61, %rcx
-; SSSE3-NEXT: sarq $63, %rcx
-; SSSE3-NEXT: movd %ecx, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT: movq %rax, %rcx
-; SSSE3-NEXT: shlq $62, %rcx
-; SSSE3-NEXT: sarq $63, %rcx
-; SSSE3-NEXT: movd %ecx, %xmm2
-; SSSE3-NEXT: shlq $63, %rax
-; SSSE3-NEXT: sarq $63, %rax
-; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i1_to_4i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movl (%rdi), %eax
-; SSE41-NEXT: movq %rax, %rcx
-; SSE41-NEXT: shlq $62, %rcx
-; SSE41-NEXT: sarq $63, %rcx
-; SSE41-NEXT: movq %rax, %rdx
-; SSE41-NEXT: shlq $63, %rdx
-; SSE41-NEXT: sarq $63, %rdx
-; SSE41-NEXT: movd %edx, %xmm0
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; SSE41-NEXT: movq %rax, %rcx
-; SSE41-NEXT: shlq $61, %rcx
-; SSE41-NEXT: sarq $63, %rcx
-; SSE41-NEXT: pinsrd $2, %ecx, %xmm0
-; SSE41-NEXT: shlq $60, %rax
-; SSE41-NEXT: sarq $63, %rax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i1_to_4i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: movl (%rdi), %eax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $62, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rax, %rdx
-; AVX1-NEXT: shlq $63, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shlq $60, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i1_to_4i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: movl (%rdi), %eax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $62, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rax, %rdx
-; AVX2-NEXT: shlq $63, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vmovd %edx, %xmm0
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX2-NEXT: shlq $60, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i1_to_4i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i1_to_4i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl (%eax), %eax
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shll $28, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm0
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shll $29, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm1
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shll $30, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm2
-; X32-SSE2-NEXT: shll $31, %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT: psrad $31, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i1_to_4i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movl (%eax), %eax
-; X32-SSE41-NEXT: movl %eax, %ecx
-; X32-SSE41-NEXT: shll $30, %ecx
-; X32-SSE41-NEXT: movl %eax, %edx
-; X32-SSE41-NEXT: shll $31, %edx
-; X32-SSE41-NEXT: movd %edx, %xmm0
-; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; X32-SSE41-NEXT: movl %eax, %ecx
-; X32-SSE41-NEXT: shll $29, %ecx
-; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm0
-; X32-SSE41-NEXT: shll $28, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT: psrad $31, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i1>, <4 x i1>* %ptr
- %Y = sext <4 x i1> %X to <4 x i32>
- ret <4 x i32> %Y
-}
-
-define <4 x i32> @load_sext_4i8_to_4i32(<4 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_4i8_to_4i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i8_to_4i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i8_to_4i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_4i8_to_4i32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i8_to_4i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i8_to_4i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i8>, <4 x i8>* %ptr
- %Y = sext <4 x i8> %X to <4 x i32>
- ret <4 x i32> %Y
-}
-
-define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
-; SSE2-LABEL: load_sext_4i1_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movl (%rdi), %eax
-; SSE2-NEXT: movl %eax, %ecx
-; SSE2-NEXT: shrl $3, %ecx
-; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: movl %eax, %ecx
-; SSE2-NEXT: shrl $2, %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: shrl %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; SSE2-NEXT: psllq $63, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
-; SSE2-NEXT: psllq $63, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i1_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movl (%rdi), %eax
-; SSSE3-NEXT: movl %eax, %ecx
-; SSSE3-NEXT: shrl $3, %ecx
-; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: movl %eax, %ecx
-; SSSE3-NEXT: shrl $2, %ecx
-; SSSE3-NEXT: movd %ecx, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: shrl %eax
-; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSSE3-NEXT: pand {{.*}}(%rip), %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; SSSE3-NEXT: psllq $63, %xmm0
-; SSSE3-NEXT: psrad $31, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
-; SSSE3-NEXT: psllq $63, %xmm1
-; SSSE3-NEXT: psrad $31, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i1_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movl (%rdi), %eax
-; SSE41-NEXT: movl %eax, %ecx
-; SSE41-NEXT: shrl %ecx
-; SSE41-NEXT: movd %eax, %xmm1
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE41-NEXT: movl %eax, %ecx
-; SSE41-NEXT: shrl $2, %ecx
-; SSE41-NEXT: pinsrd $2, %ecx, %xmm1
-; SSE41-NEXT: shrl $3, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: psllq $63, %xmm0
-; SSE41-NEXT: psrad $31, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; SSE41-NEXT: psllq $63, %xmm1
-; SSE41-NEXT: psrad $31, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i1_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: movl (%rdi), %eax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $62, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rax, %rdx
-; AVX1-NEXT: shlq $63, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vmovd %edx, %xmm0
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: shlq $61, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX1-NEXT: shlq $60, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i1_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: movl (%rdi), %eax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $60, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $61, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: shlq $62, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: shlq $63, %rax
-; AVX2-NEXT: vmovq %rax, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i1_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i1_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movzbl (%eax), %eax
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shrl $3, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm0
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: shrl $2, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm1
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT: movd %eax, %xmm2
-; X32-SSE2-NEXT: shrl %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
-; X32-SSE2-NEXT: psllq $63, %xmm0
-; X32-SSE2-NEXT: psrad $31, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
-; X32-SSE2-NEXT: psllq $63, %xmm1
-; X32-SSE2-NEXT: psrad $31, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movzbl (%eax), %eax
-; X32-SSE41-NEXT: movl %eax, %ecx
-; X32-SSE41-NEXT: shrl %ecx
-; X32-SSE41-NEXT: movd %eax, %xmm1
-; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm1
-; X32-SSE41-NEXT: movl %eax, %ecx
-; X32-SSE41-NEXT: shrl $2, %ecx
-; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm1
-; X32-SSE41-NEXT: shrl $3, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; X32-SSE41-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
-; X32-SSE41-NEXT: psllq $63, %xmm0
-; X32-SSE41-NEXT: psrad $31, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; X32-SSE41-NEXT: psllq $63, %xmm1
-; X32-SSE41-NEXT: psrad $31, %xmm1
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i1>, <4 x i1>* %ptr
- %Y = sext <4 x i1> %X to <4 x i64>
- ret <4 x i64> %Y
-}
-
-define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_4i8_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i8_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i8_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
-; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i8_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxbq (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i8_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i8_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbq (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i8_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i8>, <4 x i8>* %ptr
- %Y = sext <4 x i8> %X to <4 x i64>
- ret <4 x i64> %Y
-}
-
-define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_4i8_to_4i64_extract:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i8_to_4i64_extract:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i8_to_4i64_extract:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i8_to_4i64_extract:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbq (%rdi), %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i8_to_4i64_extract:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i8_to_4i64_extract:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm0
-; X32-SSE41-NEXT: retl
- %ld = load <4 x i8>, <4 x i8>* %ptr
- %sext = sext <4 x i8> %ld to <4 x i64>
- %extract = shufflevector <4 x i64> %sext, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
- ret <2 x i64> %extract
-}
-
-define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
-; SSE-LABEL: load_sext_8i1_to_8i16:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pcmpeqw %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: load_sext_8i1_to_8i16:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_8i1_to_8i16:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastw (%rdi), %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: load_sext_8i1_to_8i16:
-; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: kmovw (%rdi), %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: load_sext_8i1_to_8i16:
-; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: kmovw (%rdi), %k0
-; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; X32-SSE-LABEL: load_sext_8i1_to_8i16:
-; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm1, %xmm0
-; X32-SSE-NEXT: retl
-entry:
- %X = load <8 x i1>, <8 x i1>* %ptr
- %Y = sext <8 x i1> %X to <8 x i16>
- ret <8 x i16> %Y
-}
-
-define <8 x i16> @load_sext_8i8_to_8i16(<8 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_8i8_to_8i16:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_8i8_to_8i16:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psraw $8, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_8i8_to_8i16:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_8i8_to_8i16:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_8i8_to_8i16:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psraw $8, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_8i8_to_8i16:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <8 x i8>, <8 x i8>* %ptr
- %Y = sext <8 x i8> %X to <8 x i16>
- ret <8 x i16> %Y
-}
-
-define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_8i8_to_8i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT: psrad $24, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_8i8_to_8i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSSE3-NEXT: psrad $24, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
-; SSSE3-NEXT: movdqa %xmm3, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_8i8_to_8i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
-; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
-; SSE41-NEXT: pmovsxbq 4(%rdi), %xmm2
-; SSE41-NEXT: pmovsxbq 6(%rdi), %xmm3
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_8i8_to_8i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbq 6(%rdi), %xmm1
-; AVX1-NEXT: vpmovsxbq 4(%rdi), %xmm2
-; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxbq (%rdi), %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_8i8_to_8i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
-; AVX2-NEXT: vpmovsxbq 4(%rdi), %ymm1
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_8i8_to_8i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbq (%rdi), %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_8i8_to_8i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: pxor %xmm4, %xmm4
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; X32-SSE2-NEXT: psrad $24, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm3, %xmm4
-; X32-SSE2-NEXT: movdqa %xmm3, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_8i8_to_8i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
-; X32-SSE41-NEXT: pmovsxbq 4(%eax), %xmm2
-; X32-SSE41-NEXT: pmovsxbq 6(%eax), %xmm3
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <8 x i8>, <8 x i8>* %ptr
- %Y = sext <8 x i8> %X to <8 x i64>
- ret <8 x i64> %Y
-}
-
-define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
-; SSE-LABEL: load_sext_8i1_to_8i32:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pcmpeqd %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: load_sext_8i1_to_8i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_8i1_to_8i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastd (%rdi), %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_8i1_to_8i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE-LABEL: load_sext_8i1_to_8i32:
-; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
-; X32-SSE-NEXT: pand %xmm2, %xmm1
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm1
-; X32-SSE-NEXT: retl
-entry:
- %X = load <8 x i1>, <8 x i1>* %ptr
- %Y = sext <8 x i1> %X to <8 x i32>
- ret <8 x i32> %Y
-}
-
-define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_8i8_to_8i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_8i8_to_8i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_8i8_to_8i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
-; SSE41-NEXT: pmovsxbd 4(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_8i8_to_8i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbd 4(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxbd (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_8i8_to_8i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_8i8_to_8i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbd (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_8i8_to_8i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_8i8_to_8i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxbd 4(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <8 x i8>, <8 x i8>* %ptr
- %Y = sext <8 x i8> %X to <8 x i32>
- ret <8 x i32> %Y
-}
-
-define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
-; SSE2-LABEL: load_sext_16i1_to_16i8:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_16i1_to_16i8:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; SSSE3-NEXT: pand %xmm1, %xmm0
-; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_16i1_to_16i8:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; SSE41-NEXT: pand %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqb %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_16i1_to_16i8:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307]
-; AVX1-NEXT: # xmm1 = mem[0,0]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_16i1_to_16i8:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: load_sext_16i1_to_16i8:
-; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: kmovw (%rdi), %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: load_sext_16i1_to_16i8:
-; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: kmovw (%rdi), %k0
-; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_16i1_to_16i8:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; X32-SSE2-NEXT: pand %xmm1, %xmm0
-; X32-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_16i1_to_16i8:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
-; X32-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; X32-SSE41-NEXT: pand %xmm1, %xmm0
-; X32-SSE41-NEXT: pcmpeqb %xmm1, %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <16 x i1>, <16 x i1>* %ptr
- %Y = sext <16 x i1> %X to <16 x i8>
- ret <16 x i8> %Y
-}
-
-define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
-; SSE-LABEL: load_sext_16i1_to_16i16:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pcmpeqw %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: load_sext_16i1_to_16i16:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_16i1_to_16i16:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: load_sext_16i1_to_16i16:
-; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: kmovw (%rdi), %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: load_sext_16i1_to_16i16:
-; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: kmovw (%rdi), %k0
-; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT: retq
-;
-; X32-SSE-LABEL: load_sext_16i1_to_16i16:
-; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
-; X32-SSE-NEXT: pand %xmm2, %xmm1
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; X32-SSE-NEXT: retl
-entry:
- %X = load <16 x i1>, <16 x i1>* %ptr
- %Y = sext <16 x i1> %X to <16 x i16>
- ret <16 x i16> %Y
-}
-
-define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
-; SSE-LABEL: load_sext_32i1_to_32i8:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: pcmpeqb %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: load_sext_32i1_to_32i8:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_32i1_to_32i8:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19]
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: load_sext_32i1_to_32i8:
-; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: kmovw (%rdi), %k1
-; AVX512F-NEXT: kmovw 2(%rdi), %k2
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
-; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: load_sext_32i1_to_32i8:
-; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: kmovd (%rdi), %k0
-; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT: retq
-;
-; X32-SSE-LABEL: load_sext_32i1_to_32i8:
-; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; X32-SSE-NEXT: pand %xmm2, %xmm1
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm1
-; X32-SSE-NEXT: retl
-entry:
- %X = load <32 x i1>, <32 x i1>* %ptr
- %Y = sext <32 x i1> %X to <32 x i8>
- ret <32 x i8> %Y
-}
-
-define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
-; SSE2-LABEL: load_sext_16i8_to_16i16:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_16i8_to_16i16:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT: psraw $8, %xmm0
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSSE3-NEXT: psraw $8, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_16i8_to_16i16:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
-; SSE41-NEXT: pmovsxbw 8(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_16i8_to_16i16:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxbw 8(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxbw (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_16i8_to_16i16:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxbw (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_16i8_to_16i16:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxbw (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_16i8_to_16i16:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm1
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE2-NEXT: psraw $8, %xmm0
-; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X32-SSE2-NEXT: psraw $8, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_16i8_to_16i16:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxbw 8(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <16 x i8>, <16 x i8>* %ptr
- %Y = sext <16 x i8> %X to <16 x i16>
- ret <16 x i16> %Y
-}
-
-define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
-; SSE2-LABEL: load_sext_2i16_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_2i16_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_2i16_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_2i16_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_2i16_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_2i16_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <2 x i16>, <2 x i16>* %ptr
- %Y = sext <2 x i16> %X to <2 x i64>
- ret <2 x i64> %Y
-}
-
-define <4 x i32> @load_sext_4i16_to_4i32(<4 x i16> *%ptr) {
-; SSE2-LABEL: load_sext_4i16_to_4i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i16_to_4i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i16_to_4i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_4i16_to_4i32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i16_to_4i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i16_to_4i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i16>, <4 x i16>* %ptr
- %Y = sext <4 x i16> %X to <4 x i32>
- ret <4 x i32> %Y
-}
-
-define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
-; SSE2-LABEL: load_sext_4i16_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i16_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i16_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
-; SSE41-NEXT: pmovsxwq 4(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i16_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwq 4(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxwq (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i16_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwq (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i16_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwq (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i16_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxwq 4(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i16>, <4 x i16>* %ptr
- %Y = sext <4 x i16> %X to <4 x i64>
- ret <4 x i64> %Y
-}
-
-define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
-; SSE2-LABEL: load_sext_8i16_to_8i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_8i16_to_8i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_8i16_to_8i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
-; SSE41-NEXT: pmovsxwd 8(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_8i16_to_8i32:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxwd (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_8i16_to_8i32:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_8i16_to_8i32:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxwd (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_8i16_to_8i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm1
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_8i16_to_8i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxwd 8(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <8 x i16>, <8 x i16>* %ptr
- %Y = sext <8 x i16> %X to <8 x i32>
- ret <8 x i32> %Y
-}
-
-define <2 x i64> @load_sext_2i32_to_2i64(<2 x i32> *%ptr) {
-; SSE2-LABEL: load_sext_2i32_to_2i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_2i32_to_2i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_2i32_to_2i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: load_sext_2i32_to_2i64:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_2i32_to_2i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm1
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_2i32_to_2i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <2 x i32>, <2 x i32>* %ptr
- %Y = sext <2 x i32> %X to <2 x i64>
- ret <2 x i64> %Y
-}
-
-define <4 x i64> @load_sext_4i32_to_4i64(<4 x i32> *%ptr) {
-; SSE2-LABEL: load_sext_4i32_to_4i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movdqa (%rdi), %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: load_sext_4i32_to_4i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movdqa (%rdi), %xmm0
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: load_sext_4i32_to_4i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
-; SSE41-NEXT: pmovsxdq 8(%rdi), %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: load_sext_4i32_to_4i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vpmovsxdq 8(%rdi), %xmm0
-; AVX1-NEXT: vpmovsxdq (%rdi), %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_sext_4i32_to_4i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vpmovsxdq (%rdi), %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: load_sext_4i32_to_4i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovsxdq (%rdi), %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: load_sext_4i32_to_4i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movdqa (%eax), %xmm0
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: load_sext_4i32_to_4i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
-; X32-SSE41-NEXT: pmovsxdq 8(%eax), %xmm1
-; X32-SSE41-NEXT: retl
-entry:
- %X = load <4 x i32>, <4 x i32>* %ptr
- %Y = sext <4 x i32> %X to <4 x i64>
- ret <4 x i64> %Y
-}
-
-define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_2i8_to_i32:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_2i8_to_i32:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psraw $8, %xmm0
-; SSSE3-NEXT: movd %xmm0, %eax
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_2i8_to_i32:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_2i8_to_i32:
-; AVX: # %bb.0: # %entry
-; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_2i8_to_i32:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psraw $8, %xmm0
-; X32-SSE2-NEXT: movd %xmm0, %eax
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_2i8_to_i32:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
-; X32-SSE41-NEXT: movd %xmm0, %eax
-; X32-SSE41-NEXT: retl
-entry:
- %Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
- %Ex = sext <2 x i8> %Shuf to <2 x i16>
- %Bc = bitcast <2 x i16> %Ex to i32
- ret i32 %Bc
-}
-
-define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
-; SSE2-LABEL: sext_4i1_to_4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslld $31, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_4i1_to_4i64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: pslld $31, %xmm0
-; SSSE3-NEXT: psrad $31, %xmm0
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_4i1_to_4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pslld $31, %xmm0
-; SSE41-NEXT: psrad $31, %xmm0
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_4i1_to_4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_4i1_to_4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_4i1_to_4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
-; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_4i1_to_4i64:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pslld $31, %xmm0
-; X32-SSE2-NEXT: psrad $31, %xmm0
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pxor %xmm3, %xmm3
-; X32-SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_4i1_to_4i64:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: pslld $31, %xmm0
-; X32-SSE41-NEXT: psrad $31, %xmm0
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
- %extmask = sext <4 x i1> %mask to <4 x i64>
- ret <4 x i64> %extmask
-}
-
-define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
-; SSE2-LABEL: sext_4i8_to_4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $24, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_4i8_to_4i64:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSSE3-NEXT: psrad $24, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_4i8_to_4i64:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm2
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmovsxbq %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_4i8_to_4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_4i8_to_4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbq %xmm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_4i8_to_4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbq %xmm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_4i8_to_4i64:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X32-SSE2-NEXT: psrad $24, %xmm1
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: pcmpgtd %xmm1, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_4i8_to_4i64:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm2
-; X32-SSE41-NEXT: psrld $16, %xmm0
-; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
-; X32-SSE41-NEXT: retl
- %extmask = sext <4 x i8> %mask to <4 x i64>
- ret <4 x i64> %extmask
-}
-
-define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
-; SSE-LABEL: sext_32xi1_to_32xi8:
-; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqw %xmm5, %xmm1
-; SSE-NEXT: pcmpeqw %xmm4, %xmm0
-; SSE-NEXT: packsswb %xmm1, %xmm0
-; SSE-NEXT: pcmpeqw %xmm7, %xmm3
-; SSE-NEXT: pcmpeqw %xmm6, %xmm2
-; SSE-NEXT: packsswb %xmm3, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: sext_32xi1_to_32xi8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpcmpeqw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpeqw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_32xi1_to_32xi8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: retq
-;
-; AVX512F-LABEL: sext_32xi1_to_32xi8:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vpcmpeqw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512BW-LABEL: sext_32xi1_to_32xi8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
-; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512BW-NEXT: retq
-;
-; X32-SSE-LABEL: sext_32xi1_to_32xi8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pushl %ebp
-; X32-SSE-NEXT: movl %esp, %ebp
-; X32-SSE-NEXT: andl $-16, %esp
-; X32-SSE-NEXT: subl $16, %esp
-; X32-SSE-NEXT: movdqa 8(%ebp), %xmm3
-; X32-SSE-NEXT: pcmpeqw 40(%ebp), %xmm1
-; X32-SSE-NEXT: pcmpeqw 24(%ebp), %xmm0
-; X32-SSE-NEXT: packsswb %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqw 72(%ebp), %xmm3
-; X32-SSE-NEXT: pcmpeqw 56(%ebp), %xmm2
-; X32-SSE-NEXT: packsswb %xmm3, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm1
-; X32-SSE-NEXT: movl %ebp, %esp
-; X32-SSE-NEXT: popl %ebp
-; X32-SSE-NEXT: retl
- %a = icmp eq <32 x i16> %c1, %c2
- %b = sext <32 x i1> %a to <32 x i8>
- ret <32 x i8> %b
-}
-
-define <2 x i32> @sext_2i8_to_2i32(<2 x i8>* %addr) {
-; SSE2-LABEL: sext_2i8_to_2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movzwl (%rdi), %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT: psrad $24, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm0
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_2i8_to_2i32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movzwl (%rdi), %eax
-; SSSE3-NEXT: movd %eax, %xmm0
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSSE3-NEXT: psrad $24, %xmm0
-; SSSE3-NEXT: paddd %xmm0, %xmm0
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_2i8_to_2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movzwl (%rdi), %eax
-; SSE41-NEXT: movd %eax, %xmm0
-; SSE41-NEXT: pmovsxbd %xmm0, %xmm0
-; SSE41-NEXT: paddd %xmm0, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_2i8_to_2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: movzwl (%rdi), %eax
-; AVX-NEXT: vmovd %eax, %xmm0
-; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
-; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_2i8_to_2i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movzwl (%eax), %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X32-SSE2-NEXT: psrad $24, %xmm0
-; X32-SSE2-NEXT: paddd %xmm0, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_2i8_to_2i32:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movzwl (%eax), %eax
-; X32-SSE41-NEXT: movd %eax, %xmm0
-; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm0
-; X32-SSE41-NEXT: paddd %xmm0, %xmm0
-; X32-SSE41-NEXT: retl
- %x = load <2 x i8>, <2 x i8>* %addr, align 1
- %y = sext <2 x i8> %x to <2 x i32>
- %z = add <2 x i32>%y, %y
- ret <2 x i32>%z
-}
-
-define <4 x i32> @sext_4i17_to_4i32(<4 x i17>* %ptr) {
-; SSE2-LABEL: sext_4i17_to_4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shlq $30, %rcx
-; SSE2-NEXT: sarq $47, %rcx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: shlq $47, %rcx
-; SSE2-NEXT: sarq $47, %rcx
-; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movl 8(%rdi), %ecx
-; SSE2-NEXT: shll $13, %ecx
-; SSE2-NEXT: movq %rax, %rdx
-; SSE2-NEXT: shrq $51, %rdx
-; SSE2-NEXT: orl %ecx, %edx
-; SSE2-NEXT: shlq $47, %rdx
-; SSE2-NEXT: sarq $47, %rdx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: shlq $13, %rax
-; SSE2-NEXT: sarq $47, %rax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_4i17_to_4i32:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: movq (%rdi), %rax
-; SSSE3-NEXT: movq %rax, %rcx
-; SSSE3-NEXT: shlq $30, %rcx
-; SSSE3-NEXT: sarq $47, %rcx
-; SSSE3-NEXT: movd %ecx, %xmm1
-; SSSE3-NEXT: movq %rax, %rcx
-; SSSE3-NEXT: shlq $47, %rcx
-; SSSE3-NEXT: sarq $47, %rcx
-; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: movl 8(%rdi), %ecx
-; SSSE3-NEXT: shll $13, %ecx
-; SSSE3-NEXT: movq %rax, %rdx
-; SSSE3-NEXT: shrq $51, %rdx
-; SSSE3-NEXT: orl %ecx, %edx
-; SSSE3-NEXT: shlq $47, %rdx
-; SSSE3-NEXT: sarq $47, %rdx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: shlq $13, %rax
-; SSSE3-NEXT: sarq $47, %rax
-; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_4i17_to_4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movq (%rdi), %rax
-; SSE41-NEXT: movq %rax, %rcx
-; SSE41-NEXT: shlq $30, %rcx
-; SSE41-NEXT: sarq $47, %rcx
-; SSE41-NEXT: movq %rax, %rdx
-; SSE41-NEXT: shlq $47, %rdx
-; SSE41-NEXT: sarq $47, %rdx
-; SSE41-NEXT: movd %edx, %xmm0
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; SSE41-NEXT: movq %rax, %rcx
-; SSE41-NEXT: shlq $13, %rcx
-; SSE41-NEXT: sarq $47, %rcx
-; SSE41-NEXT: pinsrd $2, %ecx, %xmm0
-; SSE41-NEXT: movl 8(%rdi), %ecx
-; SSE41-NEXT: shll $13, %ecx
-; SSE41-NEXT: shrq $51, %rax
-; SSE41-NEXT: orl %ecx, %eax
-; SSE41-NEXT: shlq $47, %rax
-; SSE41-NEXT: sarq $47, %rax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: sext_4i17_to_4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: movq %rax, %rcx
-; AVX-NEXT: shlq $30, %rcx
-; AVX-NEXT: sarq $47, %rcx
-; AVX-NEXT: movq %rax, %rdx
-; AVX-NEXT: shlq $47, %rdx
-; AVX-NEXT: sarq $47, %rdx
-; AVX-NEXT: vmovd %edx, %xmm0
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
-; AVX-NEXT: movq %rax, %rcx
-; AVX-NEXT: shlq $13, %rcx
-; AVX-NEXT: sarq $47, %rcx
-; AVX-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
-; AVX-NEXT: movl 8(%rdi), %ecx
-; AVX-NEXT: shll $13, %ecx
-; AVX-NEXT: shrq $51, %rax
-; AVX-NEXT: orl %ecx, %eax
-; AVX-NEXT: shlq $47, %rax
-; AVX-NEXT: sarq $47, %rax
-; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_4i17_to_4i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl (%eax), %ecx
-; X32-SSE2-NEXT: movl 4(%eax), %edx
-; X32-SSE2-NEXT: movl 8(%eax), %eax
-; X32-SSE2-NEXT: shldl $13, %edx, %eax
-; X32-SSE2-NEXT: shll $15, %eax
-; X32-SSE2-NEXT: movd %eax, %xmm0
-; X32-SSE2-NEXT: movl %edx, %eax
-; X32-SSE2-NEXT: shll $13, %eax
-; X32-SSE2-NEXT: movd %eax, %xmm1
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE2-NEXT: shldl $15, %ecx, %edx
-; X32-SSE2-NEXT: shll $15, %ecx
-; X32-SSE2-NEXT: movd %ecx, %xmm0
-; X32-SSE2-NEXT: shll $15, %edx
-; X32-SSE2-NEXT: movd %edx, %xmm2
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE2-NEXT: psrad $15, %xmm0
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_4i17_to_4i32:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: pushl %esi
-; X32-SSE41-NEXT: .cfi_def_cfa_offset 8
-; X32-SSE41-NEXT: .cfi_offset %esi, -8
-; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE41-NEXT: movl (%eax), %ecx
-; X32-SSE41-NEXT: movl 4(%eax), %edx
-; X32-SSE41-NEXT: movl %edx, %esi
-; X32-SSE41-NEXT: movl 8(%eax), %eax
-; X32-SSE41-NEXT: shldl $13, %edx, %eax
-; X32-SSE41-NEXT: shldl $15, %ecx, %edx
-; X32-SSE41-NEXT: shll $15, %edx
-; X32-SSE41-NEXT: shll $15, %ecx
-; X32-SSE41-NEXT: movd %ecx, %xmm0
-; X32-SSE41-NEXT: pinsrd $1, %edx, %xmm0
-; X32-SSE41-NEXT: shll $13, %esi
-; X32-SSE41-NEXT: pinsrd $2, %esi, %xmm0
-; X32-SSE41-NEXT: shll $15, %eax
-; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
-; X32-SSE41-NEXT: psrad $15, %xmm0
-; X32-SSE41-NEXT: popl %esi
-; X32-SSE41-NEXT: .cfi_def_cfa_offset 4
-; X32-SSE41-NEXT: retl
- %a = load <4 x i17>, <4 x i17>* %ptr
- %b = sext <4 x i17> %a to <4 x i32>
- ret <4 x i32> %b
-}
-
-define <8 x i64> @sext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
-; SSE2-LABEL: sext_8i6_to_8i64:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd %edi, %xmm0
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; SSE2-NEXT: paddw {{.*}}(%rip), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; SSE2-NEXT: psllq $58, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: psrad $26, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
-; SSE2-NEXT: psllq $58, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrad $31, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE2-NEXT: psrad $26, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
-; SSE2-NEXT: psllq $58, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; SSE2-NEXT: psrad $26, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
-; SSE2-NEXT: psllq $58, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; SSE2-NEXT: psrad $26, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: sext_8i6_to_8i64:
-; SSSE3: # %bb.0: # %entry
-; SSSE3-NEXT: movd %edi, %xmm0
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; SSSE3-NEXT: paddw {{.*}}(%rip), %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
-; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; SSSE3-NEXT: psllq $58, %xmm0
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: psrad $31, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSSE3-NEXT: psrad $26, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
-; SSSE3-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
-; SSSE3-NEXT: psllq $58, %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: psrad $31, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSSE3-NEXT: psrad $26, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
-; SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
-; SSSE3-NEXT: psllq $58, %xmm2
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: psrad $31, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; SSSE3-NEXT: psrad $26, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
-; SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
-; SSSE3-NEXT: psllq $58, %xmm3
-; SSSE3-NEXT: movdqa %xmm3, %xmm4
-; SSSE3-NEXT: psrad $31, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; SSSE3-NEXT: psrad $26, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: sext_8i6_to_8i64:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movd %edi, %xmm0
-; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; SSE41-NEXT: paddw {{.*}}(%rip), %xmm3
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; SSE41-NEXT: psllq $58, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrad $31, %xmm1
-; SSE41-NEXT: psrad $26, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3]
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT: psllq $58, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrad $31, %xmm2
-; SSE41-NEXT: psrad $26, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; SSE41-NEXT: psllq $58, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psrad $31, %xmm4
-; SSE41-NEXT: psrad $26, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; SSE41-NEXT: psllq $58, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psrad $31, %xmm4
-; SSE41-NEXT: psrad $26, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: sext_8i6_to_8i64:
-; AVX1: # %bb.0: # %entry
-; AVX1-NEXT: vmovd %edi, %xmm0
-; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $10, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $10, %xmm0, %xmm1
-; AVX1-NEXT: vpmovsxwq %xmm1, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,2,3]
-; AVX1-NEXT: vpmovsxwq %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; AVX1-NEXT: vpmovsxwq %xmm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,0,1]
-; AVX1-NEXT: vpmovsxwq %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: sext_8i6_to_8i64:
-; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: vmovd %edi, %xmm0
-; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
-; AVX2-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpsllw $10, %xmm0, %xmm0
-; AVX2-NEXT: vpsraw $10, %xmm0, %xmm1
-; AVX2-NEXT: vpmovsxwq %xmm1, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; AVX2-NEXT: vpmovsxwq %xmm1, %ymm1
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: sext_8i6_to_8i64:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vmovd %edi, %xmm0
-; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
-; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT: vpsllq $58, %zmm0, %zmm0
-; AVX512-NEXT: vpsraq $58, %zmm0, %zmm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: sext_8i6_to_8i64:
-; X32-SSE2: # %bb.0: # %entry
-; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; X32-SSE2-NEXT: paddw {{\.LCPI.*}}, %xmm3
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,1,0,3]
-; X32-SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; X32-SSE2-NEXT: psllq $58, %xmm0
-; X32-SSE2-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE2-NEXT: psrad $31, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X32-SSE2-NEXT: psrad $26, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,3]
-; X32-SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,6,7]
-; X32-SSE2-NEXT: psllq $58, %xmm1
-; X32-SSE2-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE2-NEXT: psrad $31, %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; X32-SSE2-NEXT: psrad $26, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,2,3]
-; X32-SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
-; X32-SSE2-NEXT: psllq $58, %xmm2
-; X32-SSE2-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE2-NEXT: psrad $31, %xmm4
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; X32-SSE2-NEXT: psrad $26, %xmm2
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,3,3]
-; X32-SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,7]
-; X32-SSE2-NEXT: psllq $58, %xmm3
-; X32-SSE2-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE2-NEXT: psrad $31, %xmm4
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; X32-SSE2-NEXT: psrad $26, %xmm3
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: sext_8i6_to_8i64:
-; X32-SSE41: # %bb.0: # %entry
-; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; X32-SSE41-NEXT: paddw {{\.LCPI.*}}, %xmm3
-; X32-SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; X32-SSE41-NEXT: psllq $58, %xmm0
-; X32-SSE41-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE41-NEXT: psrad $31, %xmm1
-; X32-SSE41-NEXT: psrad $26, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3]
-; X32-SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; X32-SSE41-NEXT: psllq $58, %xmm1
-; X32-SSE41-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE41-NEXT: psrad $31, %xmm2
-; X32-SSE41-NEXT: psrad $26, %xmm1
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; X32-SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; X32-SSE41-NEXT: psllq $58, %xmm2
-; X32-SSE41-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE41-NEXT: psrad $31, %xmm4
-; X32-SSE41-NEXT: psrad $26, %xmm2
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; X32-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; X32-SSE41-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; X32-SSE41-NEXT: psllq $58, %xmm3
-; X32-SSE41-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE41-NEXT: psrad $31, %xmm4
-; X32-SSE41-NEXT: psrad $26, %xmm3
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; X32-SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; X32-SSE41-NEXT: retl
-entry:
- %a = trunc i32 %x to i6
- %b = insertelement <8 x i6> undef, i6 %a, i32 0
- %c = shufflevector <8 x i6> %b, <8 x i6> undef, <8 x i32> zeroinitializer
- %d = add <8 x i6> %c, <i6 0, i6 1, i6 2, i6 3, i6 4, i6 5, i6 6, i6 7>
- %e = sext <8 x i6> %d to <8 x i64>
- ret <8 x i64> %e
-}
-
-define <8 x i32> @zext_negate_sext(<8 x i8> %x) {
-; SSE2-LABEL: zext_negate_sext:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psubw %xmm0, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: zext_negate_sext:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT: psubw %xmm0, %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: zext_negate_sext:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: psubw %xmm0, %xmm1
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: zext_negate_sext:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: zext_negate_sext:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: zext_negate_sext:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: zext_negate_sext:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE2-NEXT: psubw %xmm0, %xmm1
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: zext_negate_sext:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X32-SSE41-NEXT: pxor %xmm1, %xmm1
-; X32-SSE41-NEXT: psubw %xmm0, %xmm1
-; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; X32-SSE41-NEXT: retl
- %z = zext <8 x i8> %x to <8 x i16>
- %neg = sub nsw <8 x i16> zeroinitializer, %z
- %r = sext <8 x i16> %neg to <8 x i32>
- ret <8 x i32> %r
-}
-
-define <8 x i32> @zext_decremenet_sext(<8 x i8> %x) {
-; SSE2-LABEL: zext_decremenet_sext:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: retq
-;
-; SSSE3-LABEL: zext_decremenet_sext:
-; SSSE3: # %bb.0:
-; SSSE3-NEXT: pxor %xmm1, %xmm1
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
-; SSSE3-NEXT: paddw %xmm0, %xmm1
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSSE3-NEXT: psrad $16, %xmm0
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSSE3-NEXT: psrad $16, %xmm1
-; SSSE3-NEXT: retq
-;
-; SSE41-LABEL: zext_decremenet_sext:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: zext_decremenet_sext:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: zext_decremenet_sext:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: zext_decremenet_sext:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
-;
-; X32-SSE2-LABEL: zext_decremenet_sext:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pxor %xmm1, %xmm1
-; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE2-NEXT: paddw %xmm0, %xmm1
-; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X32-SSE2-NEXT: psrad $16, %xmm0
-; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X32-SSE2-NEXT: psrad $16, %xmm1
-; X32-SSE2-NEXT: retl
-;
-; X32-SSE41-LABEL: zext_decremenet_sext:
-; X32-SSE41: # %bb.0:
-; X32-SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X32-SSE41-NEXT: pcmpeqd %xmm1, %xmm1
-; X32-SSE41-NEXT: paddw %xmm0, %xmm1
-; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm0
-; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; X32-SSE41-NEXT: retl
- %z = zext <8 x i8> %x to <8 x i16>
- %dec = add <8 x i16> %z, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
- %r = sext <8 x i16> %dec to <8 x i32>
- ret <8 x i32> %r
-}
Removed: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-sub128-widen.ll (removed)
@@ -1,2481 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
-;
-; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
-
-;
-; Variable Shifts
-;
-
-define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrad %xmm2, %xmm3
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrad %xmm4, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrad %xmm3, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: psrad %xmm1, %xmm0
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
-; SSE2-NEXT: movaps %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrad %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psrad %xmm4, %xmm5
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrad %xmm1, %xmm3
-; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: psrad %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v2i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v2i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: var_shift_v2i32:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: var_shift_v2i32:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512-LABEL: var_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: var_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrad %xmm2, %xmm3
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrad %xmm4, %xmm2
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psrad %xmm3, %xmm4
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: psrad %xmm1, %xmm0
-; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
-; X32-SSE-NEXT: movaps %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i32> %a, %b
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
-; SSE2-LABEL: var_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psraw $1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddw %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psraw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v4i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v4i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: var_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v4i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v4i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v4i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v4i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psraw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i16> %a, %b
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psraw $1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddw %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psraw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v2i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v2i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: var_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v2i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v2i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v2i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v2i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psraw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i16> %a, %b
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $4, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $2, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: psraw $1, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psraw $1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $2, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $1, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $2, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm4
-; X32-SSE-NEXT: pandn %xmm2, %xmm4
-; X32-SSE-NEXT: psraw $1, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm4, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psraw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <8 x i8> %a, %b
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $4, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $2, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: psraw $1, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psraw $1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $2, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $1, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $2, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm4
-; X32-SSE-NEXT: pandn %xmm2, %xmm4
-; X32-SSE-NEXT: psraw $1, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm4, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psraw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i8> %a, %b
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $4, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psraw $2, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pandn %xmm2, %xmm4
-; SSE2-NEXT: psraw $1, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psraw $1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $2, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $1, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psraw $2, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm4
-; X32-SSE-NEXT: pandn %xmm2, %xmm4
-; X32-SSE-NEXT: psraw $1, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm4, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psraw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i8> %a, %b
- ret <2 x i8> %shift
-}
-
-;
-; Uniform Variable Shifts
-;
-
-define <2 x i32> @splatvar_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
-; SSE2-NEXT: psrad %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: psrad %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i32:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX512VL-NEXT: vpsrad %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps %xmm2, %xmm2
-; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
-; X32-SSE-NEXT: psrad %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
- %shift = ashr <2 x i32> %a, %splat
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @splatvar_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psraw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT: psraw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v4i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psraw %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
- %shift = ashr <4 x i16> %a, %splat
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @splatvar_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psraw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT: psraw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v2i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psraw %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
- %shift = ashr <2 x i16> %a, %splat
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @splatvar_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: psubb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: psubb %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v8i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
-; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: splatvar_shift_v8i8:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: splatvar_shift_v8i8:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: pxor %xmm2, %xmm0
-; X32-SSE-NEXT: psubb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
- %shift = ashr <8 x i8> %a, %splat
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @splatvar_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: psubb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: psubb %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v4i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v4i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
-; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: splatvar_shift_v4i8:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: splatvar_shift_v4i8:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: pxor %xmm2, %xmm0
-; X32-SSE-NEXT: psubb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
- %shift = ashr <4 x i8> %a, %splat
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm2, %xmm0
-; SSE2-NEXT: psubb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pxor %xmm2, %xmm0
-; SSE41-NEXT: psubb %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v2i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v2i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
-; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: insertq {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: pxor %xmm2, %xmm0
-; X32-SSE-NEXT: psubb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
- %shift = ashr <2 x i8> %a, %splat
- ret <2 x i8> %shift
-}
-
-;
-; Constant Shifts
-;
-
-define <2 x i32> @constant_shift_v2i32(<2 x i32> %a) nounwind {
-; SSE2-LABEL: constant_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrad $4, %xmm1
-; SSE2-NEXT: psrad $5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrad $5, %xmm1
-; SSE41-NEXT: psrad $4, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v2i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
-; AVX1-NEXT: vpsrad $4, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v2i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: constant_shift_v2i32:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: constant_shift_v2i32:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512-LABEL: constant_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: constant_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrad $4, %xmm1
-; X32-SSE-NEXT: psrad $5, %xmm0
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i32> %a, <i32 4, i32 5>
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind {
-; SSE2-LABEL: constant_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psraw $2, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
-; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,65535,65535,65535]
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: andps %xmm2, %xmm0
-; SSE2-NEXT: psraw $1, %xmm1
-; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = <u,32768,16384,8192,u,u,u,u>
-; SSE41-NEXT: pmulhw %xmm0, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; SSE41-NEXT: psraw $1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: constant_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
-; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
-; AVX-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v4i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v4i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <0,1,2,3,u,u,u,u>
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v4i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v4i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psraw $2, %xmm1
-; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
-; X32-SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,65535,65535,65535]
-; X32-SSE-NEXT: movaps %xmm1, %xmm0
-; X32-SSE-NEXT: andps %xmm2, %xmm0
-; X32-SSE-NEXT: psraw $1, %xmm1
-; X32-SSE-NEXT: andnps %xmm1, %xmm2
-; X32-SSE-NEXT: orps %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3>
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @constant_shift_v2i16(<2 x i16> %a) nounwind {
-; SSE2-LABEL: constant_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psraw $3, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535]
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psraw $3, %xmm1
-; SSE41-NEXT: psraw $2, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: constant_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsraw $3, %xmm0, %xmm1
-; AVX-NEXT: vpsraw $2, %xmm0, %xmm0
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v2i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpsraw $3, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpsraw $2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v2i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,u,u,u,u,u,u>
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v2i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpsraw $3, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpsraw $2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v2i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psraw $3, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535]
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pandn %xmm1, %xmm2
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i16> %a, <i16 2, i16 3>
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
-; SSE-LABEL: constant_shift_v8i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v8i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: psraw $8, %xmm0
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <8 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
-; SSE-LABEL: constant_shift_v4i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v4i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v4i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: psraw $8, %xmm0
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i8> %a, <i8 0, i8 1, i8 2, i8 3>
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
-; SSE-LABEL: constant_shift_v2i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE-NEXT: psraw $8, %xmm0
-; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: packuswb %xmm2, %xmm0
-; SSE-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v2i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v2i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: psraw $8, %xmm0
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i8> %a, <i8 2, i8 3>
- ret <2 x i8> %shift
-}
-
-;
-; Uniform Constant Shifts
-;
-
-define <2 x i32> @splatconstant_shift_v2i32(<2 x i32> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i32:
-; SSE: # %bb.0:
-; SSE-NEXT: psrad $5, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i32:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrad $5, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrad $5, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i32> %a, <i32 5, i32 5>
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @splatconstant_shift_v4i16(<4 x i16> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: psraw $3, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v4i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psraw $3, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @splatconstant_shift_v2i16(<2 x i16> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: psraw $3, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psraw $3, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i16> %a, <i16 3, i16 3>
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v8i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: psubb %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v8i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; X32-SSE-NEXT: pxor %xmm1, %xmm0
-; X32-SSE-NEXT: psubb %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v4i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: psubb %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v4i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; X32-SSE-NEXT: pxor %xmm1, %xmm0
-; X32-SSE-NEXT: psubb %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3>
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: psubb %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; X32-SSE-NEXT: pxor %xmm1, %xmm0
-; X32-SSE-NEXT: psubb %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = ashr <2 x i8> %a, <i8 3, i8 3>
- ret <2 x i8> %shift
-}
Removed: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128-widen.ll?rev=368183&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-sub128-widen.ll (removed)
@@ -1,2151 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
-;
-; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
-; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
-
-;
-; Variable Shifts
-;
-
-define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrld %xmm2, %xmm3
-; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld %xmm4, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psrld %xmm3, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: psrld %xmm1, %xmm0
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
-; SSE2-NEXT: movaps %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrld %xmm2, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psrld %xmm4, %xmm5
-; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrld %xmm1, %xmm3
-; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: psrld %xmm1, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v2i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
-; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v2i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: var_shift_v2i32:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: var_shift_v2i32:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512-LABEL: var_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: var_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrld %xmm2, %xmm3
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrld %xmm4, %xmm2
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psrld %xmm3, %xmm4
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: psrld %xmm1, %xmm0
-; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
-; X32-SSE-NEXT: movaps %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i32> %a, %b
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
-; SSE2-LABEL: var_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddw %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psrlw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v4i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v4i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: var_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v4i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v4i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v4i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQVL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v4i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsrlvw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i16> %a, %b
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $12, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psraw $15, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm1
-; SSE2-NEXT: psraw $15, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: psllw $12, %xmm0
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: por %xmm0, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddw %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: psrlw $8, %xmm4
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: var_shift_v2i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shift_v2i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: var_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v2i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v2i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v2i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512DQVL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v2i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsrlvw %xmm1, %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $12, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: psraw $15, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: paddw %xmm1, %xmm1
-; X32-SSE-NEXT: psraw $15, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i16> %a, %b
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlw $4, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $2, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $1, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <8 x i8> %a, %b
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlw $4, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $2, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $1, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i8> %a, %b
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
-; SSE2-LABEL: var_shift_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: psllw $5, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: var_shift_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlw $4, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $2, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrlw $1, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: var_shift_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: var_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: var_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: var_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: var_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: var_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: var_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psllw $5, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm1
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i8> %a, %b
- ret <2 x i8> %shift
-}
-
-;
-; Uniform Variable Shifts
-;
-
-define <2 x i32> @splatvar_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
-; SSE2-NEXT: psrld %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: psrld %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i32:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; XOP-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX512-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX512VL-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps %xmm2, %xmm2
-; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
-; X32-SSE-NEXT: psrld %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
- %shift = lshr <2 x i32> %a, %splat
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @splatvar_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v4i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512VL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
- %shift = lshr <4 x i16> %a, %splat
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @splatvar_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: splatvar_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatvar_shift_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatvar_shift_v2i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512VL-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
- %shift = lshr <2 x i16> %a, %splat
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @splatvar_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v8i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: splatvar_shift_v8i8:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: splatvar_shift_v8i8:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
- %shift = lshr <8 x i8> %a, %splat
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @splatvar_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v4i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v4i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: splatvar_shift_v4i8:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: splatvar_shift_v4i8:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
- %shift = lshr <4 x i8> %a, %splat
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_shift_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_shift_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: splatvar_shift_v2i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_shift_v2i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: splatvar_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: insertq {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7,u,u,u,u,u,u,u,u]
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: splatvar_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: splatvar_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: splatvar_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: splatvar_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BWVL-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: splatvar_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X32-SSE-NEXT: psrlw %xmm1, %xmm0
-; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; X32-SSE-NEXT: psrlw %xmm1, %xmm2
-; X32-SSE-NEXT: psrlw $8, %xmm2
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %splat = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
- %shift = lshr <2 x i8> %a, %splat
- ret <2 x i8> %shift
-}
-
-;
-; Constant Shifts
-;
-
-define <2 x i32> @constant_shift_v2i32(<2 x i32> %a) nounwind {
-; SSE2-LABEL: constant_shift_v2i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $4, %xmm1
-; SSE2-NEXT: psrld $5, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v2i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $5, %xmm1
-; SSE41-NEXT: psrld $4, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v2i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrld $5, %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $4, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v2i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: retq
-;
-; XOPAVX1-LABEL: constant_shift_v2i32:
-; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: constant_shift_v2i32:
-; XOPAVX2: # %bb.0:
-; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: retq
-;
-; AVX512-LABEL: constant_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: constant_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $4, %xmm1
-; X32-SSE-NEXT: psrld $5, %xmm0
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i32> %a, <i32 4, i32 5>
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind {
-; SSE2-LABEL: constant_shift_v4i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v4i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = <u,32768,16384,8192,u,u,u,u>
-; SSE41-NEXT: pmulhuw %xmm0, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: constant_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; AVX-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v4i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v4i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <0,1,2,3,u,u,u,u>
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v4i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v4i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3>
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @constant_shift_v2i16(<2 x i16> %a) nounwind {
-; SSE2-LABEL: constant_shift_v2i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrlw $3, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535]
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v2i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrlw $3, %xmm1
-; SSE41-NEXT: psrlw $2, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: constant_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm1
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v2i16:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpsrlw $3, %xmm0, %xmm1
-; AVX512DQ-NEXT: vpsrlw $2, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v2i16:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = <2,3,u,u,u,u,u,u>
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v2i16:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpsrlw $3, %xmm0, %xmm1
-; AVX512DQVL-NEXT: vpsrlw $2, %xmm0, %xmm0
-; AVX512DQVL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v2i16:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrlw $3, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,65535,65535,65535,65535]
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pandn %xmm1, %xmm2
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i16> %a, <i16 2, i16 3>
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
-; SSE2-LABEL: constant_shift_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v8i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v8i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v8i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v8i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v8i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <8 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
-; SSE2-LABEL: constant_shift_v4i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v4i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v4i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v4i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v4i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v4i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v4i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v4i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i8> %a, <i8 0, i8 1, i8 2, i8 3>
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
-; SSE2-LABEL: constant_shift_v2i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v2i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: constant_shift_v2i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v2i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512DQ-LABEL: constant_shift_v2i8:
-; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQ-NEXT: vzeroupper
-; AVX512DQ-NEXT: retq
-;
-; AVX512BW-LABEL: constant_shift_v2i8:
-; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [2,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512BW-NEXT: vzeroupper
-; AVX512BW-NEXT: retq
-;
-; AVX512DQVL-LABEL: constant_shift_v2i8:
-; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512DQVL-NEXT: vzeroupper
-; AVX512DQVL-NEXT: retq
-;
-; AVX512BWVL-LABEL: constant_shift_v2i8:
-; AVX512BWVL: # %bb.0:
-; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BWVL-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
-; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512BWVL-NEXT: vzeroupper
-; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i8> %a, <i8 2, i8 3>
- ret <2 x i8> %shift
-}
-
-;
-; Uniform Constant Shifts
-;
-
-define <2 x i32> @splatconstant_shift_v2i32(<2 x i32> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i32:
-; SSE: # %bb.0:
-; SSE-NEXT: psrld $5, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i32:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsrld $5, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $5, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i32:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrld $5, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i32:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrld $5, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i32> %a, <i32 5, i32 5>
- ret <2 x i32> %shift
-}
-
-define <4 x i16> @splatconstant_shift_v4i16(<4 x i16> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v4i16:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v4i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v4i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v4i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v4i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
- ret <4 x i16> %shift
-}
-
-define <2 x i16> @splatconstant_shift_v2i16(<2 x i16> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i16:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i16:
-; XOP: # %bb.0:
-; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i16:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i16:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i16> %a, <i16 3, i16 3>
- ret <2 x i16> %shift
-}
-
-define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v8i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v8i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v8i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v8i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v8i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
- ret <8 x i8> %shift
-}
-
-define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v4i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v4i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v4i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v4i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v4i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v4i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3>
- ret <4 x i8> %shift
-}
-
-define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
-; SSE-LABEL: splatconstant_shift_v2i8:
-; SSE: # %bb.0:
-; SSE-NEXT: psrlw $3, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: splatconstant_shift_v2i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: splatconstant_shift_v2i8:
-; XOP: # %bb.0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
-; XOP-NEXT: retq
-;
-; AVX512-LABEL: splatconstant_shift_v2i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: retq
-;
-; AVX512VL-LABEL: splatconstant_shift_v2i8:
-; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512VL-NEXT: retq
-;
-; X32-SSE-LABEL: splatconstant_shift_v2i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: psrlw $3, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: retl
- %shift = lshr <2 x i8> %a, <i8 3, i8 3>
- ret <2 x i8> %shift
-}
More information about the llvm-commits
mailing list