[llvm] f19dff1 - [X86] scmp/ucmp - add SSE42/AVX2/AVX512 test coverage to show current state of vector legalization/lowering
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 2 04:17:34 PDT 2024
Author: Simon Pilgrim
Date: 2024-09-02T12:17:21+01:00
New Revision: f19dff1b80172ff5628bb9ecef760c65f78ba0d9
URL: https://github.com/llvm/llvm-project/commit/f19dff1b80172ff5628bb9ecef760c65f78ba0d9
DIFF: https://github.com/llvm/llvm-project/commit/f19dff1b80172ff5628bb9ecef760c65f78ba0d9.diff
LOG: [X86] scmp/ucmp - add SSE42/AVX2/AVX512 test coverage to show current state of vector legalization/lowering
Added:
Modified:
llvm/test/CodeGen/X86/scmp.ll
llvm/test/CodeGen/X86/ucmp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/scmp.ll b/llvm/test/CodeGen/X86/scmp.ll
index 62b3d4fd1bd62d..0746a07d2cdf26 100644
--- a/llvm/test/CodeGen/X86/scmp.ll
+++ b/llvm/test/CodeGen/X86/scmp.ll
@@ -1,5 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64,SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64,AVX,AVX512
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
define i8 @scmp.8.8(i8 %x, i8 %y) nounwind {
@@ -435,14 +438,30 @@ define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind {
}
define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
-; X64-LABEL: scmp_normal_vectors:
-; X64: # %bb.0:
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pcmpgtd %xmm1, %xmm2
-; X64-NEXT: pcmpgtd %xmm0, %xmm1
-; X64-NEXT: psubd %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE-LABEL: scmp_normal_vectors:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE-NEXT: psubd %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: scmp_normal_vectors:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: scmp_normal_vectors:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
+; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: scmp_normal_vectors:
; X86: # %bb.0:
@@ -487,50 +506,119 @@ define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
-; X64-LABEL: scmp_narrow_vec_result:
-; X64: # %bb.0:
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: movd %xmm0, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X64-NEXT: movd %xmm2, %edx
-; X64-NEXT: cmpl %ecx, %edx
-; X64-NEXT: setl %cl
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %cl, %dl
-; X64-NEXT: movzbl %dl, %ecx
-; X64-NEXT: shll $8, %ecx
-; X64-NEXT: orl %eax, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X64-NEXT: movd %xmm2, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: movd %xmm2, %edx
-; X64-NEXT: cmpl %eax, %edx
-; X64-NEXT: setl %al
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %al, %dl
-; X64-NEXT: movzbl %dl, %eax
-; X64-NEXT: shll $16, %eax
-; X64-NEXT: orl %ecx, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; X64-NEXT: movd %xmm0, %edx
-; X64-NEXT: cmpl %ecx, %edx
-; X64-NEXT: setl %cl
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %cl, %dl
-; X64-NEXT: movzbl %dl, %ecx
-; X64-NEXT: shll $24, %ecx
-; X64-NEXT: orl %eax, %ecx
-; X64-NEXT: movd %ecx, %xmm0
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_narrow_vec_result:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movd %xmm0, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSE2-NEXT: movd %xmm2, %edx
+; SSE2-NEXT: cmpl %ecx, %edx
+; SSE2-NEXT: setl %cl
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %cl, %dl
+; SSE2-NEXT: movzbl %dl, %ecx
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE2-NEXT: movd %xmm2, %edx
+; SSE2-NEXT: cmpl %eax, %edx
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: shll $16, %eax
+; SSE2-NEXT: orl %ecx, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE2-NEXT: movd %xmm0, %edx
+; SSE2-NEXT: cmpl %ecx, %edx
+; SSE2-NEXT: setl %cl
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %cl, %dl
+; SSE2-NEXT: movzbl %dl, %ecx
+; SSE2-NEXT: shll $24, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_narrow_vec_result:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrd $1, %xmm1, %eax
+; SSE4-NEXT: pextrd $1, %xmm0, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: movd %xmm1, %ecx
+; SSE4-NEXT: movd %xmm0, %edx
+; SSE4-NEXT: cmpl %ecx, %edx
+; SSE4-NEXT: setl %cl
+; SSE4-NEXT: setg %dl
+; SSE4-NEXT: subb %cl, %dl
+; SSE4-NEXT: movzbl %dl, %ecx
+; SSE4-NEXT: movd %ecx, %xmm2
+; SSE4-NEXT: pinsrb $1, %eax, %xmm2
+; SSE4-NEXT: pextrd $2, %xmm1, %eax
+; SSE4-NEXT: pextrd $2, %xmm0, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $2, %eax, %xmm2
+; SSE4-NEXT: pextrd $3, %xmm1, %eax
+; SSE4-NEXT: pextrd $3, %xmm0, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $3, %eax, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: scmp_narrow_vec_result:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrd $1, %xmm1, %eax
+; AVX-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: setl %al
+; AVX-NEXT: setg %cl
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: vmovd %xmm1, %eax
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: cmpl %eax, %edx
+; AVX-NEXT: setl %al
+; AVX-NEXT: setg %dl
+; AVX-NEXT: subb %al, %dl
+; AVX-NEXT: vmovd %edx, %xmm2
+; AVX-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX-NEXT: vpextrd $2, %xmm1, %eax
+; AVX-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: setl %al
+; AVX-NEXT: setg %cl
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: vpinsrb $2, %ecx, %xmm2, %xmm2
+; AVX-NEXT: vpextrd $3, %xmm1, %eax
+; AVX-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: setl %al
+; AVX-NEXT: setg %cl
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: vpinsrb $3, %ecx, %xmm2, %xmm0
+; AVX-NEXT: retq
;
; X86-LABEL: scmp_narrow_vec_result:
; X86: # %bb.0:
@@ -571,20 +659,51 @@ define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
-; X64-LABEL: scmp_narrow_vec_op:
-; X64: # %bb.0:
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
-; X64-NEXT: psrad $24, %xmm1
-; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; X64-NEXT: psrad $24, %xmm0
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pcmpgtd %xmm1, %xmm2
-; X64-NEXT: pcmpgtd %xmm0, %xmm1
-; X64-NEXT: psubd %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_narrow_vec_op:
+; SSE2: # %bb.0:
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: psrad $24, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: psrad $24, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_narrow_vec_op:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pmovsxbd %xmm1, %xmm1
+; SSE4-NEXT: pmovsxbd %xmm0, %xmm0
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE4-NEXT: psubd %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX2-LABEL: scmp_narrow_vec_op:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbd %xmm1, %xmm1
+; AVX2-NEXT: vpmovsxbd %xmm0, %xmm0
+; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: scmp_narrow_vec_op:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsxbd %xmm0, %xmm0
+; AVX512-NEXT: vpmovsxbd %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
+; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: scmp_narrow_vec_op:
; X86: # %bb.0:
@@ -629,47 +748,109 @@ define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
}
define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
-; X64-LABEL: scmp_wide_vec_result:
-; X64: # %bb.0:
-; X64-NEXT: movdqa %xmm1, %xmm2
-; X64-NEXT: movdqa %xmm0, %xmm3
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X64-NEXT: psrad $24, %xmm0
-; X64-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; X64-NEXT: psrad $24, %xmm5
-; X64-NEXT: movdqa %xmm5, %xmm6
-; X64-NEXT: pcmpgtd %xmm0, %xmm6
-; X64-NEXT: pcmpgtd %xmm5, %xmm0
-; X64-NEXT: psubd %xmm6, %xmm0
-; X64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; X64-NEXT: psrad $24, %xmm1
-; X64-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
-; X64-NEXT: psrad $24, %xmm4
-; X64-NEXT: movdqa %xmm4, %xmm5
-; X64-NEXT: pcmpgtd %xmm1, %xmm5
-; X64-NEXT: pcmpgtd %xmm4, %xmm1
-; X64-NEXT: psubd %xmm5, %xmm1
-; X64-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; X64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; X64-NEXT: psrad $24, %xmm2
-; X64-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
-; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; X64-NEXT: psrad $24, %xmm3
-; X64-NEXT: movdqa %xmm3, %xmm6
-; X64-NEXT: pcmpgtd %xmm2, %xmm6
-; X64-NEXT: pcmpgtd %xmm3, %xmm2
-; X64-NEXT: psubd %xmm6, %xmm2
-; X64-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; X64-NEXT: psrad $24, %xmm3
-; X64-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; X64-NEXT: psrad $24, %xmm4
-; X64-NEXT: movdqa %xmm4, %xmm5
-; X64-NEXT: pcmpgtd %xmm3, %xmm5
-; X64-NEXT: pcmpgtd %xmm4, %xmm3
-; X64-NEXT: psubd %xmm5, %xmm3
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_wide_vec_result:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: psrad $24, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: psrad $24, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm0
+; SSE2-NEXT: psubd %xmm6, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $24, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $24, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
+; SSE2-NEXT: psubd %xmm5, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: psrad $24, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; SSE2-NEXT: psrad $24, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
+; SSE2-NEXT: psubd %xmm6, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: psrad $24, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; SSE2-NEXT: psrad $24, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm5, %xmm3
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_wide_vec_result:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pmovsxbd %xmm1, %xmm0
+; SSE4-NEXT: pmovsxbd %xmm4, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE4-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE4-NEXT: psubd %xmm3, %xmm0
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSE4-NEXT: pmovsxbd %xmm2, %xmm5
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,1,1]
+; SSE4-NEXT: pmovsxbd %xmm2, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: pcmpgtd %xmm5, %xmm3
+; SSE4-NEXT: pcmpgtd %xmm2, %xmm5
+; SSE4-NEXT: psubd %xmm3, %xmm5
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: pmovsxbd %xmm2, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
+; SSE4-NEXT: pmovsxbd %xmm3, %xmm3
+; SSE4-NEXT: movdqa %xmm3, %xmm6
+; SSE4-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE4-NEXT: pcmpgtd %xmm3, %xmm2
+; SSE4-NEXT: psubd %xmm6, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE4-NEXT: pmovsxbd %xmm1, %xmm3
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; SSE4-NEXT: pmovsxbd %xmm1, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE4-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE4-NEXT: psubd %xmm4, %xmm3
+; SSE4-NEXT: movdqa %xmm5, %xmm1
+; SSE4-NEXT: retq
+;
+; AVX2-LABEL: scmp_wide_vec_result:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbd %xmm1, %ymm2
+; AVX2-NEXT: vpmovsxbd %xmm0, %ymm3
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm3
+; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm1
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: scmp_wide_vec_result:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpgtb %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: scmp_wide_vec_result:
; X86: # %bb.0:
@@ -813,145 +994,515 @@ define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
}
define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind {
-; X64-LABEL: scmp_wide_vec_op:
-; X64: # %bb.0:
-; X64-NEXT: movq %xmm7, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm8
-; X64-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; X64-NEXT: movq %xmm7, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm7
-; X64-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; X64-NEXT: movq %xmm6, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm7
-; X64-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
-; X64-NEXT: movq %xmm6, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm6
-; X64-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-NEXT: movq %xmm5, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm6
-; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; X64-NEXT: movq %xmm5, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm5
-; X64-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; X64-NEXT: movq %xmm4, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm5
-; X64-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; X64-NEXT: movq %xmm4, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm4
-; X64-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
-; X64-NEXT: movq %xmm3, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm4
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; X64-NEXT: movq %xmm3, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; X64-NEXT: movq %xmm2, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; X64-NEXT: movq %xmm2, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; X64-NEXT: movq %xmm1, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; X64-NEXT: movq %xmm1, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; X64-NEXT: movq %xmm0, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, %rax
-; X64-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: setl %al
-; X64-NEXT: setg %cl
-; X64-NEXT: subb %al, %cl
-; X64-NEXT: movzbl %cl, %eax
-; X64-NEXT: movd %eax, %xmm0
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_wide_vec_op:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %xmm7, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; SSE2-NEXT: movq %xmm7, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm7
+; SSE2-NEXT: movq %xmm6, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
+; SSE2-NEXT: movq %xmm6, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; SSE2-NEXT: movq %xmm5, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; SSE2-NEXT: movq %xmm5, %rcx
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: movd %eax, %xmm6
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movq %xmm4, %rcx
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: movd %eax, %xmm8
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movq %xmm3, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE2-NEXT: movq %xmm3, %rcx
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; SSE2-NEXT: movq %xmm1, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %cl
+; SSE2-NEXT: subb %al, %cl
+; SSE2-NEXT: movzbl %cl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_wide_vec_op:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrq $1, %xmm0, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: movq %xmm0, %rcx
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: setl %cl
+; SSE4-NEXT: setg %dl
+; SSE4-NEXT: subb %cl, %dl
+; SSE4-NEXT: movzbl %dl, %ecx
+; SSE4-NEXT: movd %ecx, %xmm0
+; SSE4-NEXT: pinsrb $1, %eax, %xmm0
+; SSE4-NEXT: movq %xmm1, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $2, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm1, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $3, %eax, %xmm0
+; SSE4-NEXT: movq %xmm2, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $4, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm2, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $5, %eax, %xmm0
+; SSE4-NEXT: movq %xmm3, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $6, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm3, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $7, %eax, %xmm0
+; SSE4-NEXT: movq %xmm4, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $8, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm4, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $9, %eax, %xmm0
+; SSE4-NEXT: movq %xmm5, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $10, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm5, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $11, %eax, %xmm0
+; SSE4-NEXT: movq %xmm6, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $12, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm6, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $13, %eax, %xmm0
+; SSE4-NEXT: movq %xmm7, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $14, %eax, %xmm0
+; SSE4-NEXT: pextrq $1, %xmm7, %rax
+; SSE4-NEXT: cmpq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: pinsrb $15, %eax, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX2-LABEL: scmp_wide_vec_op:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: vmovq %xmm0, %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %dl
+; AVX2-NEXT: subb %al, %dl
+; AVX2-NEXT: vmovd %edx, %xmm8
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm8, %xmm8
+; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $2, %ecx, %xmm8, %xmm8
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $3, %ecx, %xmm8, %xmm0
+; AVX2-NEXT: vmovq %xmm5, %rax
+; AVX2-NEXT: vmovq %xmm1, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm5, %rax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm6, %rax
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm6, %rax
+; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm7, %rax
+; AVX2-NEXT: vmovq %xmm3, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm7, %rax
+; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: scmp_wide_vec_op:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: vmovq %xmm0, %rdx
+; AVX512-NEXT: cmpq %rax, %rdx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %dl
+; AVX512-NEXT: subb %al, %dl
+; AVX512-NEXT: vmovd %edx, %xmm4
+; AVX512-NEXT: vpinsrb $1, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm5
+; AVX512-NEXT: vmovq %xmm5, %rax
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm6
+; AVX512-NEXT: vmovq %xmm6, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $2, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vpextrq $1, %xmm5, %rax
+; AVX512-NEXT: vpextrq $1, %xmm6, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $3, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512-NEXT: vmovq %xmm5, %rax
+; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512-NEXT: vmovq %xmm6, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $4, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vpextrq $1, %xmm5, %rax
+; AVX512-NEXT: vpextrq $1, %xmm6, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $5, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vextracti32x4 $3, %zmm2, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm0, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $6, %ecx, %xmm4, %xmm4
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $7, %ecx, %xmm4, %xmm0
+; AVX512-NEXT: vmovq %xmm3, %rax
+; AVX512-NEXT: vmovq %xmm1, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm3, %rax
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX512-NEXT: vmovq %xmm4, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: vpextrq $1, %xmm4, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vextracti32x4 $2, %zmm3, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm4
+; AVX512-NEXT: vmovq %xmm4, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: vpextrq $1, %xmm4, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: vpinsrb $15, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
;
; X86-LABEL: scmp_wide_vec_op:
; X86: # %bb.0:
@@ -1204,167 +1755,489 @@ define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind {
}
define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind {
-; X64-LABEL: scmp_uncommon_vectors:
-; X64: # %bb.0:
-; X64-NEXT: pushq %rbp
-; X64-NEXT: pushq %r15
-; X64-NEXT: pushq %r14
-; X64-NEXT: pushq %r13
-; X64-NEXT: pushq %r12
-; X64-NEXT: pushq %rbx
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
-; X64-NEXT: addb %r15b, %r15b
-; X64-NEXT: sarb %r15b
-; X64-NEXT: addb %sil, %sil
-; X64-NEXT: sarb %sil
-; X64-NEXT: cmpb %r15b, %sil
-; X64-NEXT: setl %sil
-; X64-NEXT: setg %r15b
-; X64-NEXT: subb %sil, %r15b
-; X64-NEXT: movsbq %r15b, %rsi
-; X64-NEXT: movq %rsi, (%rax)
-; X64-NEXT: movq %rsi, %xmm0
-; X64-NEXT: sarq $63, %rsi
-; X64-NEXT: addb %r14b, %r14b
-; X64-NEXT: sarb %r14b
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
-; X64-NEXT: addb %r15b, %r15b
-; X64-NEXT: sarb %r15b
-; X64-NEXT: cmpb %r14b, %r15b
-; X64-NEXT: setl %r14b
-; X64-NEXT: setg %r15b
-; X64-NEXT: subb %r14b, %r15b
-; X64-NEXT: movsbq %r15b, %r14
-; X64-NEXT: movq %r14, %r15
-; X64-NEXT: sarq $63, %r15
-; X64-NEXT: addb %bpl, %bpl
-; X64-NEXT: sarb %bpl
-; X64-NEXT: addb %dl, %dl
-; X64-NEXT: sarb %dl
-; X64-NEXT: cmpb %bpl, %dl
-; X64-NEXT: setl %dl
-; X64-NEXT: setg %bpl
-; X64-NEXT: subb %dl, %bpl
-; X64-NEXT: movsbq %bpl, %rdx
-; X64-NEXT: movq %rdx, %r12
-; X64-NEXT: sarq $63, %r12
-; X64-NEXT: addb %bl, %bl
-; X64-NEXT: sarb %bl
-; X64-NEXT: addb %cl, %cl
-; X64-NEXT: sarb %cl
-; X64-NEXT: cmpb %bl, %cl
-; X64-NEXT: setl %cl
-; X64-NEXT: setg %bl
-; X64-NEXT: subb %cl, %bl
-; X64-NEXT: movsbq %bl, %rbx
-; X64-NEXT: movq %rbx, %rcx
-; X64-NEXT: sarq $63, %rcx
-; X64-NEXT: addb %r11b, %r11b
-; X64-NEXT: sarb %r11b
-; X64-NEXT: addb %r8b, %r8b
-; X64-NEXT: sarb %r8b
-; X64-NEXT: cmpb %r11b, %r8b
-; X64-NEXT: setl %r8b
-; X64-NEXT: setg %r11b
-; X64-NEXT: subb %r8b, %r11b
-; X64-NEXT: movsbq %r11b, %r8
-; X64-NEXT: movq %r8, %r11
-; X64-NEXT: sarq $63, %r11
-; X64-NEXT: addb %r10b, %r10b
-; X64-NEXT: sarb %r10b
-; X64-NEXT: addb %r9b, %r9b
-; X64-NEXT: sarb %r9b
-; X64-NEXT: cmpb %r10b, %r9b
-; X64-NEXT: setl %r9b
-; X64-NEXT: setg %r10b
-; X64-NEXT: subb %r9b, %r10b
-; X64-NEXT: movsbq %r10b, %r9
-; X64-NEXT: movq %r9, %r10
-; X64-NEXT: sarq $63, %r10
-; X64-NEXT: addb %dil, %dil
-; X64-NEXT: sarb %dil
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
-; X64-NEXT: addb %bpl, %bpl
-; X64-NEXT: sarb %bpl
-; X64-NEXT: cmpb %dil, %bpl
-; X64-NEXT: setl %dil
-; X64-NEXT: setg %bpl
-; X64-NEXT: subb %dil, %bpl
-; X64-NEXT: movsbq %bpl, %r13
-; X64-NEXT: movq %r13, %rbp
-; X64-NEXT: sarq $63, %rbp
-; X64-NEXT: movq %rbp, %rdi
-; X64-NEXT: shldq $62, %r13, %rdi
-; X64-NEXT: movq %rdi, 88(%rax)
-; X64-NEXT: shrq $2, %rbp
-; X64-NEXT: movl %ebp, 96(%rax)
-; X64-NEXT: movq %r10, %rdi
-; X64-NEXT: shldq $20, %r9, %rdi
-; X64-NEXT: movq %rdi, 64(%rax)
-; X64-NEXT: movq %r11, %rdi
-; X64-NEXT: shldq $31, %r8, %rdi
-; X64-NEXT: movq %rdi, 48(%rax)
-; X64-NEXT: movq %rcx, %rdi
-; X64-NEXT: shldq $42, %rbx, %rdi
-; X64-NEXT: movq %rdi, 32(%rax)
-; X64-NEXT: movabsq $9007199254738944, %rdi # imm = 0x1FFFFFFFFFF800
-; X64-NEXT: andq %r12, %rdi
-; X64-NEXT: shldq $53, %rdx, %r12
-; X64-NEXT: movq %r12, 16(%rax)
-; X64-NEXT: movabsq $9007199254740991, %r12 # imm = 0x1FFFFFFFFFFFFF
-; X64-NEXT: andq %r12, %r15
-; X64-NEXT: shldq $9, %r14, %r15
-; X64-NEXT: shlq $62, %r13
-; X64-NEXT: orq %r15, %r13
-; X64-NEXT: movq %r13, 80(%rax)
-; X64-NEXT: movabsq $2251799813685247, %r15 # imm = 0x7FFFFFFFFFFFF
-; X64-NEXT: andq %rbp, %r15
-; X64-NEXT: movq %r15, %r13
-; X64-NEXT: shrq $48, %r13
-; X64-NEXT: movb %r13b, 102(%rax)
-; X64-NEXT: shrq $32, %r15
-; X64-NEXT: movw %r15w, 100(%rax)
-; X64-NEXT: shlq $42, %rbx
-; X64-NEXT: shrq $11, %rdi
-; X64-NEXT: orq %rbx, %rdi
-; X64-NEXT: movq %rdi, 24(%rax)
-; X64-NEXT: shlq $9, %r14
-; X64-NEXT: shrq $44, %r10
-; X64-NEXT: andl $511, %r10d # imm = 0x1FF
-; X64-NEXT: orq %r14, %r10
-; X64-NEXT: movq %r10, 72(%rax)
-; X64-NEXT: shlq $20, %r9
-; X64-NEXT: shrq $33, %r11
-; X64-NEXT: andl $1048575, %r11d # imm = 0xFFFFF
-; X64-NEXT: orq %r9, %r11
-; X64-NEXT: movq %r11, 56(%rax)
-; X64-NEXT: shlq $31, %r8
-; X64-NEXT: shrq $22, %rcx
-; X64-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
-; X64-NEXT: orq %r8, %rcx
-; X64-NEXT: movq %rcx, 40(%rax)
-; X64-NEXT: movq %rsi, %xmm1
-; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; X64-NEXT: movq %xmm0, %rcx
-; X64-NEXT: andq %r12, %rcx
-; X64-NEXT: shlq $53, %rdx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: movq %rdx, 8(%rax)
-; X64-NEXT: popq %rbx
-; X64-NEXT: popq %r12
-; X64-NEXT: popq %r13
-; X64-NEXT: popq %r14
-; X64-NEXT: popq %r15
-; X64-NEXT: popq %rbp
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_uncommon_vectors:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %r14
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r12d
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
+; SSE2-NEXT: addb %dil, %dil
+; SSE2-NEXT: sarb %dil
+; SSE2-NEXT: addb %sil, %sil
+; SSE2-NEXT: sarb %sil
+; SSE2-NEXT: cmpb %dil, %sil
+; SSE2-NEXT: setl %sil
+; SSE2-NEXT: setg %dil
+; SSE2-NEXT: subb %sil, %dil
+; SSE2-NEXT: movsbq %dil, %rax
+; SSE2-NEXT: movq %rax, (%r14)
+; SSE2-NEXT: movq %rax, %rsi
+; SSE2-NEXT: sarq $63, %rsi
+; SSE2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: addb %r11b, %r11b
+; SSE2-NEXT: sarb %r11b
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: addb %sil, %sil
+; SSE2-NEXT: sarb %sil
+; SSE2-NEXT: cmpb %r11b, %sil
+; SSE2-NEXT: setl %sil
+; SSE2-NEXT: setg %r11b
+; SSE2-NEXT: subb %sil, %r11b
+; SSE2-NEXT: movsbq %r11b, %rdi
+; SSE2-NEXT: movq %rdi, %r11
+; SSE2-NEXT: sarq $63, %r11
+; SSE2-NEXT: addb %r12b, %r12b
+; SSE2-NEXT: sarb %r12b
+; SSE2-NEXT: addb %dl, %dl
+; SSE2-NEXT: sarb %dl
+; SSE2-NEXT: cmpb %r12b, %dl
+; SSE2-NEXT: setl %dl
+; SSE2-NEXT: setg %sil
+; SSE2-NEXT: subb %dl, %sil
+; SSE2-NEXT: movsbq %sil, %rdx
+; SSE2-NEXT: movq %rdx, %r13
+; SSE2-NEXT: sarq $63, %r13
+; SSE2-NEXT: addb %r15b, %r15b
+; SSE2-NEXT: sarb %r15b
+; SSE2-NEXT: addb %cl, %cl
+; SSE2-NEXT: sarb %cl
+; SSE2-NEXT: cmpb %r15b, %cl
+; SSE2-NEXT: setl %cl
+; SSE2-NEXT: setg %sil
+; SSE2-NEXT: subb %cl, %sil
+; SSE2-NEXT: movsbq %sil, %r15
+; SSE2-NEXT: movq %r15, %rcx
+; SSE2-NEXT: sarq $63, %rcx
+; SSE2-NEXT: addb %bpl, %bpl
+; SSE2-NEXT: sarb %bpl
+; SSE2-NEXT: addb %r8b, %r8b
+; SSE2-NEXT: sarb %r8b
+; SSE2-NEXT: cmpb %bpl, %r8b
+; SSE2-NEXT: setl %sil
+; SSE2-NEXT: setg %r8b
+; SSE2-NEXT: subb %sil, %r8b
+; SSE2-NEXT: movsbq %r8b, %r8
+; SSE2-NEXT: movq %r8, %r12
+; SSE2-NEXT: sarq $63, %r12
+; SSE2-NEXT: addb %bl, %bl
+; SSE2-NEXT: sarb %bl
+; SSE2-NEXT: addb %r9b, %r9b
+; SSE2-NEXT: sarb %r9b
+; SSE2-NEXT: cmpb %bl, %r9b
+; SSE2-NEXT: setl %sil
+; SSE2-NEXT: setg %r9b
+; SSE2-NEXT: subb %sil, %r9b
+; SSE2-NEXT: movsbq %r9b, %r9
+; SSE2-NEXT: movq %r9, %rbx
+; SSE2-NEXT: sarq $63, %rbx
+; SSE2-NEXT: addb %r10b, %r10b
+; SSE2-NEXT: sarb %r10b
+; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: addb %sil, %sil
+; SSE2-NEXT: sarb %sil
+; SSE2-NEXT: cmpb %r10b, %sil
+; SSE2-NEXT: setl %sil
+; SSE2-NEXT: setg %r10b
+; SSE2-NEXT: subb %sil, %r10b
+; SSE2-NEXT: movsbq %r10b, %rbp
+; SSE2-NEXT: movq %rbp, %r10
+; SSE2-NEXT: sarq $63, %r10
+; SSE2-NEXT: movq %r10, %rsi
+; SSE2-NEXT: shldq $62, %rbp, %rsi
+; SSE2-NEXT: movq %rax, %xmm0
+; SSE2-NEXT: movq %rsi, 88(%r14)
+; SSE2-NEXT: shrq $2, %r10
+; SSE2-NEXT: movl %r10d, 96(%r14)
+; SSE2-NEXT: movq %rbx, %rsi
+; SSE2-NEXT: shldq $20, %r9, %rsi
+; SSE2-NEXT: movq %rsi, 64(%r14)
+; SSE2-NEXT: movq %r12, %rsi
+; SSE2-NEXT: shldq $31, %r8, %rsi
+; SSE2-NEXT: movq %rsi, 48(%r14)
+; SSE2-NEXT: movq %rcx, %rsi
+; SSE2-NEXT: shldq $42, %r15, %rsi
+; SSE2-NEXT: movabsq $9007199254738944, %rax # imm = 0x1FFFFFFFFFF800
+; SSE2-NEXT: andq %r13, %rax
+; SSE2-NEXT: shldq $53, %rdx, %r13
+; SSE2-NEXT: movq %rsi, 32(%r14)
+; SSE2-NEXT: movq %r13, 16(%r14)
+; SSE2-NEXT: movabsq $9007199254740991, %rsi # imm = 0x1FFFFFFFFFFFFF
+; SSE2-NEXT: andq %rsi, %r11
+; SSE2-NEXT: shldq $9, %rdi, %r11
+; SSE2-NEXT: shlq $62, %rbp
+; SSE2-NEXT: orq %r11, %rbp
+; SSE2-NEXT: movq %rbp, 80(%r14)
+; SSE2-NEXT: movabsq $2251799813685247, %r11 # imm = 0x7FFFFFFFFFFFF
+; SSE2-NEXT: andq %r10, %r11
+; SSE2-NEXT: movq %r11, %r10
+; SSE2-NEXT: shrq $48, %r10
+; SSE2-NEXT: movb %r10b, 102(%r14)
+; SSE2-NEXT: shrq $32, %r11
+; SSE2-NEXT: movw %r11w, 100(%r14)
+; SSE2-NEXT: shlq $42, %r15
+; SSE2-NEXT: shrq $11, %rax
+; SSE2-NEXT: orq %r15, %rax
+; SSE2-NEXT: movq %rax, 24(%r14)
+; SSE2-NEXT: shlq $9, %rdi
+; SSE2-NEXT: shrq $44, %rbx
+; SSE2-NEXT: andl $511, %ebx # imm = 0x1FF
+; SSE2-NEXT: orq %rdi, %rbx
+; SSE2-NEXT: movq %rbx, 72(%r14)
+; SSE2-NEXT: shlq $20, %r9
+; SSE2-NEXT: shrq $33, %r12
+; SSE2-NEXT: andl $1048575, %r12d # imm = 0xFFFFF
+; SSE2-NEXT: orq %r9, %r12
+; SSE2-NEXT: movq %r12, 56(%r14)
+; SSE2-NEXT: shlq $31, %r8
+; SSE2-NEXT: shrq $22, %rcx
+; SSE2-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; SSE2-NEXT: orq %r8, %rcx
+; SSE2-NEXT: movq %rcx, 40(%r14)
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Folded Reload
+; SSE2-NEXT: # xmm1 = mem[0],zero
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: andq %rsi, %rax
+; SSE2-NEXT: shlq $53, %rdx
+; SSE2-NEXT: orq %rax, %rdx
+; SSE2-NEXT: movq %rdx, 8(%r14)
+; SSE2-NEXT: movq %r14, %rax
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_uncommon_vectors:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: movq %rdi, %r14
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r13d
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
+; SSE4-NEXT: addb %dil, %dil
+; SSE4-NEXT: sarb %dil
+; SSE4-NEXT: addb %sil, %sil
+; SSE4-NEXT: sarb %sil
+; SSE4-NEXT: cmpb %dil, %sil
+; SSE4-NEXT: setl %sil
+; SSE4-NEXT: setg %dil
+; SSE4-NEXT: subb %sil, %dil
+; SSE4-NEXT: movsbq %dil, %r12
+; SSE4-NEXT: movq %r12, %rdi
+; SSE4-NEXT: sarq $63, %rdi
+; SSE4-NEXT: addb %r10b, %r10b
+; SSE4-NEXT: sarb %r10b
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; SSE4-NEXT: addb %sil, %sil
+; SSE4-NEXT: sarb %sil
+; SSE4-NEXT: cmpb %r10b, %sil
+; SSE4-NEXT: setl %sil
+; SSE4-NEXT: setg %r10b
+; SSE4-NEXT: subb %sil, %r10b
+; SSE4-NEXT: movsbq %r10b, %r10
+; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: sarq $63, %r10
+; SSE4-NEXT: addb %r11b, %r11b
+; SSE4-NEXT: sarb %r11b
+; SSE4-NEXT: addb %dl, %dl
+; SSE4-NEXT: sarb %dl
+; SSE4-NEXT: cmpb %r11b, %dl
+; SSE4-NEXT: setl %dl
+; SSE4-NEXT: setg %r11b
+; SSE4-NEXT: subb %dl, %r11b
+; SSE4-NEXT: movsbq %r11b, %r11
+; SSE4-NEXT: movq %r11, %rsi
+; SSE4-NEXT: sarq $63, %rsi
+; SSE4-NEXT: addb %bl, %bl
+; SSE4-NEXT: sarb %bl
+; SSE4-NEXT: addb %cl, %cl
+; SSE4-NEXT: sarb %cl
+; SSE4-NEXT: cmpb %bl, %cl
+; SSE4-NEXT: setl %cl
+; SSE4-NEXT: setg %dl
+; SSE4-NEXT: subb %cl, %dl
+; SSE4-NEXT: movsbq %dl, %rbx
+; SSE4-NEXT: movq %rbx, %rcx
+; SSE4-NEXT: sarq $63, %rcx
+; SSE4-NEXT: addb %r13b, %r13b
+; SSE4-NEXT: sarb %r13b
+; SSE4-NEXT: addb %r8b, %r8b
+; SSE4-NEXT: sarb %r8b
+; SSE4-NEXT: cmpb %r13b, %r8b
+; SSE4-NEXT: setl %dl
+; SSE4-NEXT: setg %r8b
+; SSE4-NEXT: subb %dl, %r8b
+; SSE4-NEXT: movsbq %r8b, %rdx
+; SSE4-NEXT: movq %rdx, %r8
+; SSE4-NEXT: sarq $63, %r8
+; SSE4-NEXT: addb %r15b, %r15b
+; SSE4-NEXT: sarb %r15b
+; SSE4-NEXT: addb %r9b, %r9b
+; SSE4-NEXT: sarb %r9b
+; SSE4-NEXT: cmpb %r15b, %r9b
+; SSE4-NEXT: setl %r9b
+; SSE4-NEXT: setg %r15b
+; SSE4-NEXT: subb %r9b, %r15b
+; SSE4-NEXT: movsbq %r15b, %r9
+; SSE4-NEXT: movq %r9, %r15
+; SSE4-NEXT: sarq $63, %r15
+; SSE4-NEXT: addb %bpl, %bpl
+; SSE4-NEXT: sarb %bpl
+; SSE4-NEXT: movzbl {{[0-9]+}}(%rsp), %r13d
+; SSE4-NEXT: addb %r13b, %r13b
+; SSE4-NEXT: sarb %r13b
+; SSE4-NEXT: cmpb %bpl, %r13b
+; SSE4-NEXT: setl %bpl
+; SSE4-NEXT: setg %r13b
+; SSE4-NEXT: subb %bpl, %r13b
+; SSE4-NEXT: movsbq %r13b, %rbp
+; SSE4-NEXT: movq %rbp, %rax
+; SSE4-NEXT: sarq $63, %rax
+; SSE4-NEXT: movq %rax, %r13
+; SSE4-NEXT: shldq $62, %rbp, %r13
+; SSE4-NEXT: movq %r12, (%r14)
+; SSE4-NEXT: movq %r13, 88(%r14)
+; SSE4-NEXT: shrq $2, %rax
+; SSE4-NEXT: movl %eax, 96(%r14)
+; SSE4-NEXT: movq %r15, %r12
+; SSE4-NEXT: shldq $20, %r9, %r12
+; SSE4-NEXT: movq %r12, 64(%r14)
+; SSE4-NEXT: movq %r8, %r12
+; SSE4-NEXT: shldq $31, %rdx, %r12
+; SSE4-NEXT: movq %r12, 48(%r14)
+; SSE4-NEXT: movq %rcx, %r12
+; SSE4-NEXT: shldq $42, %rbx, %r12
+; SSE4-NEXT: movabsq $9007199254738944, %r13 # imm = 0x1FFFFFFFFFF800
+; SSE4-NEXT: andq %rsi, %r13
+; SSE4-NEXT: shldq $53, %r11, %rsi
+; SSE4-NEXT: movq %r12, 32(%r14)
+; SSE4-NEXT: movq %rsi, 16(%r14)
+; SSE4-NEXT: movabsq $9007199254740991, %rsi # imm = 0x1FFFFFFFFFFFFF
+; SSE4-NEXT: andq %rsi, %r10
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE4-NEXT: shldq $9, %r12, %r10
+; SSE4-NEXT: shlq $62, %rbp
+; SSE4-NEXT: orq %r10, %rbp
+; SSE4-NEXT: movq %rbp, 80(%r14)
+; SSE4-NEXT: andq %rsi, %rdi
+; SSE4-NEXT: shlq $53, %r11
+; SSE4-NEXT: orq %rdi, %r11
+; SSE4-NEXT: movq %r11, 8(%r14)
+; SSE4-NEXT: movabsq $2251799813685247, %rsi # imm = 0x7FFFFFFFFFFFF
+; SSE4-NEXT: andq %rax, %rsi
+; SSE4-NEXT: movq %rsi, %rax
+; SSE4-NEXT: shrq $48, %rax
+; SSE4-NEXT: movb %al, 102(%r14)
+; SSE4-NEXT: shrq $32, %rsi
+; SSE4-NEXT: movw %si, 100(%r14)
+; SSE4-NEXT: shlq $42, %rbx
+; SSE4-NEXT: shrq $11, %r13
+; SSE4-NEXT: orq %rbx, %r13
+; SSE4-NEXT: movq %r13, 24(%r14)
+; SSE4-NEXT: movq %r12, %rax
+; SSE4-NEXT: shlq $9, %rax
+; SSE4-NEXT: shrq $44, %r15
+; SSE4-NEXT: andl $511, %r15d # imm = 0x1FF
+; SSE4-NEXT: orq %rax, %r15
+; SSE4-NEXT: movq %r15, 72(%r14)
+; SSE4-NEXT: shlq $20, %r9
+; SSE4-NEXT: shrq $33, %r8
+; SSE4-NEXT: andl $1048575, %r8d # imm = 0xFFFFF
+; SSE4-NEXT: orq %r9, %r8
+; SSE4-NEXT: movq %r8, 56(%r14)
+; SSE4-NEXT: shlq $31, %rdx
+; SSE4-NEXT: shrq $22, %rcx
+; SSE4-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; SSE4-NEXT: orq %rdx, %rcx
+; SSE4-NEXT: movq %rcx, 40(%r14)
+; SSE4-NEXT: movq %r14, %rax
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: scmp_uncommon_vectors:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: pushq %r15
+; AVX-NEXT: pushq %r14
+; AVX-NEXT: pushq %r13
+; AVX-NEXT: pushq %r12
+; AVX-NEXT: pushq %rbx
+; AVX-NEXT: movq %rdi, %rax
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %edi
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r10d
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ebx
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %ebp
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r15d
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r14d
+; AVX-NEXT: addb %r14b, %r14b
+; AVX-NEXT: sarb %r14b
+; AVX-NEXT: addb %sil, %sil
+; AVX-NEXT: sarb %sil
+; AVX-NEXT: cmpb %r14b, %sil
+; AVX-NEXT: setl %sil
+; AVX-NEXT: setg %r14b
+; AVX-NEXT: subb %sil, %r14b
+; AVX-NEXT: movsbq %r14b, %r14
+; AVX-NEXT: movq %r14, (%rax)
+; AVX-NEXT: sarq $63, %r14
+; AVX-NEXT: addb %r15b, %r15b
+; AVX-NEXT: sarb %r15b
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %esi
+; AVX-NEXT: addb %sil, %sil
+; AVX-NEXT: sarb %sil
+; AVX-NEXT: cmpb %r15b, %sil
+; AVX-NEXT: setl %sil
+; AVX-NEXT: setg %r15b
+; AVX-NEXT: subb %sil, %r15b
+; AVX-NEXT: movsbq %r15b, %rsi
+; AVX-NEXT: movq %rsi, %r15
+; AVX-NEXT: sarq $63, %r15
+; AVX-NEXT: addb %bpl, %bpl
+; AVX-NEXT: sarb %bpl
+; AVX-NEXT: addb %dl, %dl
+; AVX-NEXT: sarb %dl
+; AVX-NEXT: cmpb %bpl, %dl
+; AVX-NEXT: setl %dl
+; AVX-NEXT: setg %bpl
+; AVX-NEXT: subb %dl, %bpl
+; AVX-NEXT: movsbq %bpl, %r12
+; AVX-NEXT: movq %r12, %r13
+; AVX-NEXT: sarq $63, %r13
+; AVX-NEXT: addb %bl, %bl
+; AVX-NEXT: sarb %bl
+; AVX-NEXT: addb %cl, %cl
+; AVX-NEXT: sarb %cl
+; AVX-NEXT: cmpb %bl, %cl
+; AVX-NEXT: setl %cl
+; AVX-NEXT: setg %dl
+; AVX-NEXT: subb %cl, %dl
+; AVX-NEXT: movsbq %dl, %rbx
+; AVX-NEXT: movq %rbx, %rcx
+; AVX-NEXT: sarq $63, %rcx
+; AVX-NEXT: addb %r11b, %r11b
+; AVX-NEXT: sarb %r11b
+; AVX-NEXT: addb %r8b, %r8b
+; AVX-NEXT: sarb %r8b
+; AVX-NEXT: cmpb %r11b, %r8b
+; AVX-NEXT: setl %dl
+; AVX-NEXT: setg %r8b
+; AVX-NEXT: subb %dl, %r8b
+; AVX-NEXT: movsbq %r8b, %rdx
+; AVX-NEXT: movq %rdx, %r8
+; AVX-NEXT: sarq $63, %r8
+; AVX-NEXT: addb %r10b, %r10b
+; AVX-NEXT: sarb %r10b
+; AVX-NEXT: addb %r9b, %r9b
+; AVX-NEXT: sarb %r9b
+; AVX-NEXT: cmpb %r10b, %r9b
+; AVX-NEXT: setl %r9b
+; AVX-NEXT: setg %r10b
+; AVX-NEXT: subb %r9b, %r10b
+; AVX-NEXT: movsbq %r10b, %r9
+; AVX-NEXT: movq %r9, %r10
+; AVX-NEXT: sarq $63, %r10
+; AVX-NEXT: addb %dil, %dil
+; AVX-NEXT: sarb %dil
+; AVX-NEXT: movzbl {{[0-9]+}}(%rsp), %r11d
+; AVX-NEXT: addb %r11b, %r11b
+; AVX-NEXT: sarb %r11b
+; AVX-NEXT: cmpb %dil, %r11b
+; AVX-NEXT: setl %dil
+; AVX-NEXT: setg %r11b
+; AVX-NEXT: subb %dil, %r11b
+; AVX-NEXT: movsbq %r11b, %r11
+; AVX-NEXT: movq %r11, %rdi
+; AVX-NEXT: sarq $63, %rdi
+; AVX-NEXT: movq %rdi, %rbp
+; AVX-NEXT: shldq $62, %r11, %rbp
+; AVX-NEXT: movq %rbp, 88(%rax)
+; AVX-NEXT: shrq $2, %rdi
+; AVX-NEXT: movl %edi, 96(%rax)
+; AVX-NEXT: movq %r10, %rbp
+; AVX-NEXT: shldq $20, %r9, %rbp
+; AVX-NEXT: movq %rbp, 64(%rax)
+; AVX-NEXT: movq %r8, %rbp
+; AVX-NEXT: shldq $31, %rdx, %rbp
+; AVX-NEXT: movq %rbp, 48(%rax)
+; AVX-NEXT: movq %rcx, %rbp
+; AVX-NEXT: shldq $42, %rbx, %rbp
+; AVX-NEXT: movq %rbp, 32(%rax)
+; AVX-NEXT: movl $53, %ebp
+; AVX-NEXT: bzhiq %rbp, %r13, %rbp
+; AVX-NEXT: shldq $53, %r12, %r13
+; AVX-NEXT: movq %r13, 16(%rax)
+; AVX-NEXT: movb $53, %r13b
+; AVX-NEXT: bzhiq %r13, %r15, %r15
+; AVX-NEXT: shldq $9, %rsi, %r15
+; AVX-NEXT: shlq $62, %r11
+; AVX-NEXT: orq %r15, %r11
+; AVX-NEXT: movq %r11, 80(%rax)
+; AVX-NEXT: bzhiq %r13, %r14, %r11
+; AVX-NEXT: shlq $53, %r12
+; AVX-NEXT: orq %r11, %r12
+; AVX-NEXT: movq %r12, 8(%rax)
+; AVX-NEXT: movb $51, %r11b
+; AVX-NEXT: bzhiq %r11, %rdi, %rdi
+; AVX-NEXT: movq %rdi, %r11
+; AVX-NEXT: shrq $48, %r11
+; AVX-NEXT: movb %r11b, 102(%rax)
+; AVX-NEXT: shrq $32, %rdi
+; AVX-NEXT: movw %di, 100(%rax)
+; AVX-NEXT: shlq $42, %rbx
+; AVX-NEXT: shrq $11, %rbp
+; AVX-NEXT: orq %rbx, %rbp
+; AVX-NEXT: movq %rbp, 24(%rax)
+; AVX-NEXT: shlq $9, %rsi
+; AVX-NEXT: shrq $44, %r10
+; AVX-NEXT: andl $511, %r10d # imm = 0x1FF
+; AVX-NEXT: orq %rsi, %r10
+; AVX-NEXT: movq %r10, 72(%rax)
+; AVX-NEXT: shlq $20, %r9
+; AVX-NEXT: shrq $33, %r8
+; AVX-NEXT: andl $1048575, %r8d # imm = 0xFFFFF
+; AVX-NEXT: orq %r9, %r8
+; AVX-NEXT: movq %r8, 56(%rax)
+; AVX-NEXT: shlq $31, %rdx
+; AVX-NEXT: shrq $22, %rcx
+; AVX-NEXT: andl $2147483647, %ecx # imm = 0x7FFFFFFF
+; AVX-NEXT: orq %rdx, %rcx
+; AVX-NEXT: movq %rcx, 40(%rax)
+; AVX-NEXT: popq %rbx
+; AVX-NEXT: popq %r12
+; AVX-NEXT: popq %r13
+; AVX-NEXT: popq %r14
+; AVX-NEXT: popq %r15
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
;
; X86-LABEL: scmp_uncommon_vectors:
; X86: # %bb.0:
@@ -1618,36 +2491,122 @@ define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind {
}
define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind {
-; X64-LABEL: scmp_bool_operands:
-; X64: # %bb.0:
-; X64-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andb $1, %al
-; X64-NEXT: negb %al
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
-; X64-NEXT: andb $1, %dl
-; X64-NEXT: negb %dl
-; X64-NEXT: cmpb %al, %dl
-; X64-NEXT: setl %al
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %al, %dl
-; X64-NEXT: movzbl %dl, %eax
-; X64-NEXT: andb $1, %cl
-; X64-NEXT: negb %cl
-; X64-NEXT: andb $1, %sil
-; X64-NEXT: negb %sil
-; X64-NEXT: cmpb %cl, %sil
-; X64-NEXT: setl %cl
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %cl, %dl
-; X64-NEXT: movzbl %dl, %ecx
-; X64-NEXT: shll $8, %ecx
-; X64-NEXT: orl %eax, %ecx
-; X64-NEXT: movd %ecx, %xmm0
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_bool_operands:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: negb %al
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE2-NEXT: andb $1, %dl
+; SSE2-NEXT: negb %dl
+; SSE2-NEXT: cmpb %al, %dl
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movzbl %dl, %eax
+; SSE2-NEXT: andb $1, %cl
+; SSE2-NEXT: negb %cl
+; SSE2-NEXT: andb $1, %sil
+; SSE2-NEXT: negb %sil
+; SSE2-NEXT: cmpb %cl, %sil
+; SSE2-NEXT: setl %cl
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %cl, %dl
+; SSE2-NEXT: movzbl %dl, %ecx
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_bool_operands:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrb $8, %xmm1, %eax
+; SSE4-NEXT: andb $1, %al
+; SSE4-NEXT: negb %al
+; SSE4-NEXT: pextrb $8, %xmm0, %ecx
+; SSE4-NEXT: andb $1, %cl
+; SSE4-NEXT: negb %cl
+; SSE4-NEXT: cmpb %al, %cl
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: movd %xmm1, %ecx
+; SSE4-NEXT: andb $1, %cl
+; SSE4-NEXT: negb %cl
+; SSE4-NEXT: movd %xmm0, %edx
+; SSE4-NEXT: andb $1, %dl
+; SSE4-NEXT: negb %dl
+; SSE4-NEXT: cmpb %cl, %dl
+; SSE4-NEXT: setl %cl
+; SSE4-NEXT: setg %dl
+; SSE4-NEXT: subb %cl, %dl
+; SSE4-NEXT: movzbl %dl, %ecx
+; SSE4-NEXT: movd %ecx, %xmm0
+; SSE4-NEXT: pinsrb $1, %eax, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX2-LABEL: scmp_bool_operands:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: negb %al
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: negb %cl
+; AVX2-NEXT: cmpb %al, %cl
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %cl
+; AVX2-NEXT: subb %al, %cl
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: negb %al
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: andb $1, %dl
+; AVX2-NEXT: negb %dl
+; AVX2-NEXT: cmpb %al, %dl
+; AVX2-NEXT: setl %al
+; AVX2-NEXT: setg %dl
+; AVX2-NEXT: subb %al, %dl
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: scmp_bool_operands:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512-NEXT: vpmovq2m %xmm0, %k0
+; AVX512-NEXT: kshiftrb $1, %k0, %k1
+; AVX512-NEXT: kmovd %k1, %eax
+; AVX512-NEXT: vpsllq $63, %xmm1, %xmm0
+; AVX512-NEXT: vpmovq2m %xmm0, %k1
+; AVX512-NEXT: kshiftrb $1, %k1, %k2
+; AVX512-NEXT: kmovd %k2, %ecx
+; AVX512-NEXT: andb $1, %cl
+; AVX512-NEXT: negb %cl
+; AVX512-NEXT: andb $1, %al
+; AVX512-NEXT: negb %al
+; AVX512-NEXT: cmpb %cl, %al
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %cl
+; AVX512-NEXT: subb %al, %cl
+; AVX512-NEXT: kmovd %k1, %eax
+; AVX512-NEXT: andb $1, %al
+; AVX512-NEXT: negb %al
+; AVX512-NEXT: kmovd %k0, %edx
+; AVX512-NEXT: andb $1, %dl
+; AVX512-NEXT: negb %dl
+; AVX512-NEXT: cmpb %al, %dl
+; AVX512-NEXT: setl %al
+; AVX512-NEXT: setg %dl
+; AVX512-NEXT: subb %al, %dl
+; AVX512-NEXT: vmovd %edx, %xmm0
+; AVX512-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
+; AVX512-NEXT: retq
;
; X86-LABEL: scmp_bool_operands:
; X86: # %bb.0:
@@ -1677,27 +2636,67 @@ define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind {
}
define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwind {
-; X64-LABEL: scmp_ret_wider_than_operands:
-; X64: # %bb.0:
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: movl %eax, %ecx
-; X64-NEXT: shrl $8, %ecx
-; X64-NEXT: movd %xmm0, %edx
-; X64-NEXT: movl %edx, %esi
-; X64-NEXT: shrl $8, %esi
-; X64-NEXT: cmpb %cl, %sil
-; X64-NEXT: setl %cl
-; X64-NEXT: setg %sil
-; X64-NEXT: subb %cl, %sil
-; X64-NEXT: movsbl %sil, %ecx
-; X64-NEXT: cmpb %al, %dl
-; X64-NEXT: setl %al
-; X64-NEXT: setg %dl
-; X64-NEXT: subb %al, %dl
-; X64-NEXT: movsbl %dl, %eax
-; X64-NEXT: movd %eax, %xmm0
-; X64-NEXT: pinsrw $1, %ecx, %xmm0
-; X64-NEXT: retq
+; SSE2-LABEL: scmp_ret_wider_than_operands:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movl %eax, %ecx
+; SSE2-NEXT: shrl $8, %ecx
+; SSE2-NEXT: movd %xmm0, %edx
+; SSE2-NEXT: movl %edx, %esi
+; SSE2-NEXT: shrl $8, %esi
+; SSE2-NEXT: cmpb %cl, %sil
+; SSE2-NEXT: setl %cl
+; SSE2-NEXT: setg %sil
+; SSE2-NEXT: subb %cl, %sil
+; SSE2-NEXT: movsbl %sil, %ecx
+; SSE2-NEXT: cmpb %al, %dl
+; SSE2-NEXT: setl %al
+; SSE2-NEXT: setg %dl
+; SSE2-NEXT: subb %al, %dl
+; SSE2-NEXT: movsbl %dl, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: pinsrw $1, %ecx, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE4-LABEL: scmp_ret_wider_than_operands:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pextrb $1, %xmm1, %eax
+; SSE4-NEXT: pextrb $1, %xmm0, %ecx
+; SSE4-NEXT: cmpb %al, %cl
+; SSE4-NEXT: setl %al
+; SSE4-NEXT: setg %cl
+; SSE4-NEXT: subb %al, %cl
+; SSE4-NEXT: movsbl %cl, %eax
+; SSE4-NEXT: movd %xmm1, %ecx
+; SSE4-NEXT: movd %xmm0, %edx
+; SSE4-NEXT: cmpb %cl, %dl
+; SSE4-NEXT: setl %cl
+; SSE4-NEXT: setg %dl
+; SSE4-NEXT: subb %cl, %dl
+; SSE4-NEXT: movsbl %dl, %ecx
+; SSE4-NEXT: movd %ecx, %xmm0
+; SSE4-NEXT: pinsrw $1, %eax, %xmm0
+; SSE4-NEXT: retq
+;
+; AVX-LABEL: scmp_ret_wider_than_operands:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrb $1, %xmm1, %eax
+; AVX-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX-NEXT: cmpb %al, %cl
+; AVX-NEXT: setl %al
+; AVX-NEXT: setg %cl
+; AVX-NEXT: subb %al, %cl
+; AVX-NEXT: movsbl %cl, %eax
+; AVX-NEXT: vmovd %xmm1, %ecx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: cmpb %cl, %dl
+; AVX-NEXT: setl %cl
+; AVX-NEXT: setg %dl
+; AVX-NEXT: subb %cl, %dl
+; AVX-NEXT: movsbl %dl, %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: retq
;
; X86-LABEL: scmp_ret_wider_than_operands:
; X86: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/ucmp.ll b/llvm/test/CodeGen/X86/ucmp.ll
index 8e7f9f551527b1..cd643cb8d63751 100644
--- a/llvm/test/CodeGen/X86/ucmp.ll
+++ b/llvm/test/CodeGen/X86/ucmp.ll
@@ -1,5 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64,SSE,SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64,AVX,AVX512
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind {
@@ -247,15 +250,25 @@ define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind {
}
define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind {
-; X64-LABEL: ucmp_narrow_op:
-; X64: # %bb.0:
-; X64-NEXT: movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF
-; X64-NEXT: andq %rax, %rsi
-; X64-NEXT: andq %rax, %rdi
-; X64-NEXT: cmpq %rsi, %rdi
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: retq
+; SSE-LABEL: ucmp_narrow_op:
+; SSE: # %bb.0:
+; SSE-NEXT: movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF
+; SSE-NEXT: andq %rax, %rsi
+; SSE-NEXT: andq %rax, %rdi
+; SSE-NEXT: cmpq %rsi, %rdi
+; SSE-NEXT: seta %al
+; SSE-NEXT: sbbb $0, %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ucmp_narrow_op:
+; AVX: # %bb.0:
+; AVX-NEXT: movb $62, %al
+; AVX-NEXT: bzhiq %rax, %rsi, %rcx
+; AVX-NEXT: bzhiq %rax, %rdi, %rax
+; AVX-NEXT: cmpq %rcx, %rax
+; AVX-NEXT: seta %al
+; AVX-NEXT: sbbb $0, %al
+; AVX-NEXT: retq
;
; X86-LABEL: ucmp_narrow_op:
; X86: # %bb.0:
@@ -315,19 +328,33 @@ define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind {
}
define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind {
-; X64-LABEL: ucmp_wide_op:
-; X64: # %bb.0:
-; X64-NEXT: movabsq $35184372088831, %rax # imm = 0x1FFFFFFFFFFF
-; X64-NEXT: andq %rax, %rsi
-; X64-NEXT: andq %rax, %rcx
-; X64-NEXT: cmpq %rdi, %rdx
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: sbbq %rsi, %rax
-; X64-NEXT: setb %al
-; X64-NEXT: cmpq %rdx, %rdi
-; X64-NEXT: sbbq %rcx, %rsi
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: retq
+; SSE-LABEL: ucmp_wide_op:
+; SSE: # %bb.0:
+; SSE-NEXT: movabsq $35184372088831, %rax # imm = 0x1FFFFFFFFFFF
+; SSE-NEXT: andq %rax, %rsi
+; SSE-NEXT: andq %rax, %rcx
+; SSE-NEXT: cmpq %rdi, %rdx
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: sbbq %rsi, %rax
+; SSE-NEXT: setb %al
+; SSE-NEXT: cmpq %rdx, %rdi
+; SSE-NEXT: sbbq %rcx, %rsi
+; SSE-NEXT: sbbb $0, %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ucmp_wide_op:
+; AVX: # %bb.0:
+; AVX-NEXT: movb $45, %al
+; AVX-NEXT: bzhiq %rax, %rsi, %rsi
+; AVX-NEXT: bzhiq %rax, %rcx, %rcx
+; AVX-NEXT: cmpq %rdi, %rdx
+; AVX-NEXT: movq %rcx, %rax
+; AVX-NEXT: sbbq %rsi, %rax
+; AVX-NEXT: setb %al
+; AVX-NEXT: cmpq %rdx, %rdi
+; AVX-NEXT: sbbq %rcx, %rsi
+; AVX-NEXT: sbbb $0, %al
+; AVX-NEXT: retq
;
; X86-LABEL: ucmp_wide_op:
; X86: # %bb.0:
@@ -394,17 +421,45 @@ define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind {
}
define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
-; X64-LABEL: ucmp_normal_vectors:
-; X64: # %bb.0:
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; X64-NEXT: pxor %xmm2, %xmm1
-; X64-NEXT: pxor %xmm2, %xmm0
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pcmpgtd %xmm1, %xmm2
-; X64-NEXT: pcmpgtd %xmm0, %xmm1
-; X64-NEXT: psubd %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_normal_vectors:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE4-NEXT: pxor %xmm2, %xmm1
+; SSE4-NEXT: pxor %xmm2, %xmm0
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE4-NEXT: psubd %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_normal_vectors:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pmaxud %xmm1, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE2-NEXT: pminud %xmm0, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT: psubd %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ucmp_normal_vectors:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
+; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ucmp_normal_vectors:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpltud %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpnleud %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
+; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: ucmp_normal_vectors:
; X86: # %bb.0:
@@ -445,46 +500,107 @@ define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
-; X64-LABEL: ucmp_narrow_vec_result:
-; X64: # %bb.0:
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: movd %xmm0, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; X64-NEXT: movd %xmm2, %edx
-; X64-NEXT: cmpl %ecx, %edx
-; X64-NEXT: seta %cl
-; X64-NEXT: sbbb $0, %cl
-; X64-NEXT: movzbl %cl, %ecx
-; X64-NEXT: shll $8, %ecx
-; X64-NEXT: orl %eax, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; X64-NEXT: movd %xmm2, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; X64-NEXT: movd %xmm2, %edx
-; X64-NEXT: cmpl %eax, %edx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: shll $16, %eax
-; X64-NEXT: orl %ecx, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; X64-NEXT: movd %xmm0, %edx
-; X64-NEXT: cmpl %ecx, %edx
-; X64-NEXT: seta %cl
-; X64-NEXT: sbbb $0, %cl
-; X64-NEXT: movzbl %cl, %ecx
-; X64-NEXT: shll $24, %ecx
-; X64-NEXT: orl %eax, %ecx
-; X64-NEXT: movd %ecx, %xmm0
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_narrow_vec_result:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movd %xmm1, %eax
+; SSE4-NEXT: movd %xmm0, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: seta %al
+; SSE4-NEXT: sbbb $0, %al
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSE4-NEXT: movd %xmm2, %ecx
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSE4-NEXT: movd %xmm2, %edx
+; SSE4-NEXT: cmpl %ecx, %edx
+; SSE4-NEXT: seta %cl
+; SSE4-NEXT: sbbb $0, %cl
+; SSE4-NEXT: movzbl %cl, %ecx
+; SSE4-NEXT: shll $8, %ecx
+; SSE4-NEXT: orl %eax, %ecx
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: movd %xmm2, %eax
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; SSE4-NEXT: movd %xmm2, %edx
+; SSE4-NEXT: cmpl %eax, %edx
+; SSE4-NEXT: seta %al
+; SSE4-NEXT: sbbb $0, %al
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: shll $16, %eax
+; SSE4-NEXT: orl %ecx, %eax
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE4-NEXT: movd %xmm1, %ecx
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; SSE4-NEXT: movd %xmm0, %edx
+; SSE4-NEXT: cmpl %ecx, %edx
+; SSE4-NEXT: seta %cl
+; SSE4-NEXT: sbbb $0, %cl
+; SSE4-NEXT: movzbl %cl, %ecx
+; SSE4-NEXT: shll $24, %ecx
+; SSE4-NEXT: orl %eax, %ecx
+; SSE4-NEXT: movd %ecx, %xmm0
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_narrow_vec_result:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pextrd $1, %xmm1, %eax
+; SSE2-NEXT: pextrd $1, %xmm0, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: movd %xmm0, %edx
+; SSE2-NEXT: cmpl %ecx, %edx
+; SSE2-NEXT: seta %cl
+; SSE2-NEXT: sbbb $0, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: pinsrb $1, %eax, %xmm2
+; SSE2-NEXT: pextrd $2, %xmm1, %eax
+; SSE2-NEXT: pextrd $2, %xmm0, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $2, %eax, %xmm2
+; SSE2-NEXT: pextrd $3, %xmm1, %eax
+; SSE2-NEXT: pextrd $3, %xmm0, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $3, %eax, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: ucmp_narrow_vec_result:
+; AVX: # %bb.0:
+; AVX-NEXT: vpextrd $1, %xmm1, %eax
+; AVX-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: seta %al
+; AVX-NEXT: sbbb $0, %al
+; AVX-NEXT: vmovd %xmm1, %ecx
+; AVX-NEXT: vmovd %xmm0, %edx
+; AVX-NEXT: cmpl %ecx, %edx
+; AVX-NEXT: seta %cl
+; AVX-NEXT: sbbb $0, %cl
+; AVX-NEXT: vmovd %ecx, %xmm2
+; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrd $2, %xmm1, %eax
+; AVX-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: seta %al
+; AVX-NEXT: sbbb $0, %al
+; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX-NEXT: vpextrd $3, %xmm1, %eax
+; AVX-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX-NEXT: cmpl %eax, %ecx
+; AVX-NEXT: seta %al
+; AVX-NEXT: sbbb $0, %al
+; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm0
+; AVX-NEXT: retq
;
; X86-LABEL: ucmp_narrow_vec_result:
; X86: # %bb.0:
@@ -521,19 +637,50 @@ define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
-; X64-LABEL: ucmp_narrow_vec_op:
-; X64: # %bb.0:
-; X64-NEXT: pxor %xmm2, %xmm2
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pcmpgtd %xmm1, %xmm2
-; X64-NEXT: pcmpgtd %xmm0, %xmm1
-; X64-NEXT: psubd %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_narrow_vec_op:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pxor %xmm2, %xmm2
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE4-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE4-NEXT: psubd %xmm2, %xmm1
+; SSE4-NEXT: movdqa %xmm1, %xmm0
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_narrow_vec_op:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ucmp_narrow_vec_op:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ucmp_narrow_vec_op:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1]
+; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: ucmp_narrow_vec_op:
; X86: # %bb.0:
@@ -574,45 +721,107 @@ define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
}
define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
-; X64-LABEL: ucmp_wide_vec_result:
-; X64: # %bb.0:
-; X64-NEXT: movdqa %xmm1, %xmm3
-; X64-NEXT: pxor %xmm5, %xmm5
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; X64-NEXT: movdqa %xmm1, %xmm4
-; X64-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X64-NEXT: movdqa %xmm2, %xmm6
-; X64-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; X64-NEXT: movdqa %xmm6, %xmm7
-; X64-NEXT: pcmpgtd %xmm4, %xmm7
-; X64-NEXT: pcmpgtd %xmm6, %xmm4
-; X64-NEXT: psubd %xmm7, %xmm4
-; X64-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; X64-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; X64-NEXT: movdqa %xmm2, %xmm6
-; X64-NEXT: pcmpgtd %xmm1, %xmm6
-; X64-NEXT: pcmpgtd %xmm2, %xmm1
-; X64-NEXT: psubd %xmm6, %xmm1
-; X64-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
-; X64-NEXT: movdqa %xmm3, %xmm2
-; X64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; X64-NEXT: movdqa %xmm0, %xmm6
-; X64-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; X64-NEXT: movdqa %xmm6, %xmm7
-; X64-NEXT: pcmpgtd %xmm2, %xmm7
-; X64-NEXT: pcmpgtd %xmm6, %xmm2
-; X64-NEXT: psubd %xmm7, %xmm2
-; X64-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; X64-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; X64-NEXT: movdqa %xmm0, %xmm5
-; X64-NEXT: pcmpgtd %xmm3, %xmm5
-; X64-NEXT: pcmpgtd %xmm0, %xmm3
-; X64-NEXT: psubd %xmm5, %xmm3
-; X64-NEXT: movdqa %xmm4, %xmm0
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_wide_vec_result:
+; SSE4: # %bb.0:
+; SSE4-NEXT: movdqa %xmm1, %xmm3
+; SSE4-NEXT: pxor %xmm5, %xmm5
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE4-NEXT: movdqa %xmm2, %xmm6
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; SSE4-NEXT: movdqa %xmm6, %xmm7
+; SSE4-NEXT: pcmpgtd %xmm4, %xmm7
+; SSE4-NEXT: pcmpgtd %xmm6, %xmm4
+; SSE4-NEXT: psubd %xmm7, %xmm4
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE4-NEXT: movdqa %xmm2, %xmm6
+; SSE4-NEXT: pcmpgtd %xmm1, %xmm6
+; SSE4-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE4-NEXT: psubd %xmm6, %xmm1
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
+; SSE4-NEXT: movdqa %xmm3, %xmm2
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
+; SSE4-NEXT: movdqa %xmm0, %xmm6
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; SSE4-NEXT: movdqa %xmm6, %xmm7
+; SSE4-NEXT: pcmpgtd %xmm2, %xmm7
+; SSE4-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE4-NEXT: psubd %xmm7, %xmm2
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; SSE4-NEXT: movdqa %xmm0, %xmm5
+; SSE4-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE4-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE4-NEXT: psubd %xmm5, %xmm3
+; SSE4-NEXT: movdqa %xmm4, %xmm0
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_wide_vec_result:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT: psubd %xmm3, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,1,1]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
+; SSE2-NEXT: psubd %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
+; SSE2-NEXT: psubd %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; SSE2-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ucmp_wide_vec_result:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm4
+; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm3
+; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm1
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ucmp_wide_vec_result:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpltub %xmm1, %xmm0, %k1
+; AVX512-NEXT: vpcmpnleub %xmm1, %xmm0, %k2
+; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; AVX512-NEXT: retq
;
; X86-LABEL: ucmp_wide_vec_result:
; X86: # %bb.0:
@@ -739,161 +948,408 @@ define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
}
define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind {
-; X64-LABEL: ucmp_wide_vec_op:
-; X64: # %bb.0:
-; X64-NEXT: pshufd {{.*#+}} xmm8 = xmm7[3,3,3,3]
-; X64-NEXT: movd %xmm8, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm8 = xmm3[3,3,3,3]
-; X64-NEXT: movd %xmm8, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm8
-; X64-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3]
-; X64-NEXT: movd %xmm9, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm9 = xmm3[2,3,2,3]
-; X64-NEXT: movd %xmm9, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm9
-; X64-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; X64-NEXT: movd %xmm7, %eax
-; X64-NEXT: movd %xmm3, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm8
-; X64-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
-; X64-NEXT: movd %xmm7, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
-; X64-NEXT: movd %xmm3, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3]
-; X64-NEXT: movd %xmm3, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm2[3,3,3,3]
-; X64-NEXT: movd %xmm3, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
-; X64-NEXT: movd %xmm7, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
-; X64-NEXT: movd %xmm7, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm7
-; X64-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; X64-NEXT: movd %xmm6, %eax
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,1,1]
-; X64-NEXT: movd %xmm6, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,3,3,3]
-; X64-NEXT: movd %xmm2, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; X64-NEXT: movd %xmm6, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; X64-NEXT: movd %xmm6, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm6
-; X64-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; X64-NEXT: movd %xmm5, %eax
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
-; X64-NEXT: movd %xmm5, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
-; X64-NEXT: movd %xmm1, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; X64-NEXT: movd %xmm5, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
-; X64-NEXT: movd %xmm5, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm5
-; X64-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; X64-NEXT: movd %xmm4, %eax
-; X64-NEXT: movd %xmm0, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
-; X64-NEXT: movd %xmm4, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X64-NEXT: movd %xmm0, %ecx
-; X64-NEXT: cmpl %eax, %ecx
-; X64-NEXT: seta %al
-; X64-NEXT: sbbb $0, %al
-; X64-NEXT: movzbl %al, %eax
-; X64-NEXT: movd %eax, %xmm0
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_wide_vec_op:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm7[3,3,3,3]
+; SSE4-NEXT: movd %xmm8, %eax
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[3,3,3,3]
+; SSE4-NEXT: movd %xmm8, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: seta %al
+; SSE4-NEXT: sbbb $0, %al
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
+; SSE4-NEXT: movd %xmm8, %ecx
+; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
+; SSE4-NEXT: movd %xmm8, %edx
+; SSE4-NEXT: cmpl %ecx, %edx
+; SSE4-NEXT: seta %cl
+; SSE4-NEXT: sbbb $0, %cl
+; SSE4-NEXT: movd %xmm7, %edx
+; SSE4-NEXT: movd %xmm3, %esi
+; SSE4-NEXT: cmpl %edx, %esi
+; SSE4-NEXT: seta %dl
+; SSE4-NEXT: sbbb $0, %dl
+; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
+; SSE4-NEXT: movd %xmm7, %esi
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; SSE4-NEXT: movd %xmm3, %edi
+; SSE4-NEXT: cmpl %esi, %edi
+; SSE4-NEXT: seta %sil
+; SSE4-NEXT: movzbl %al, %eax
+; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE4-NEXT: sbbb $0, %sil
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3]
+; SSE4-NEXT: movd %xmm3, %edi
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[3,3,3,3]
+; SSE4-NEXT: movd %xmm3, %r8d
+; SSE4-NEXT: cmpl %edi, %r8d
+; SSE4-NEXT: seta %dil
+; SSE4-NEXT: sbbb $0, %dil
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
+; SSE4-NEXT: movd %xmm3, %r8d
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; SSE4-NEXT: movd %xmm3, %r9d
+; SSE4-NEXT: cmpl %r8d, %r9d
+; SSE4-NEXT: seta %r8b
+; SSE4-NEXT: movzbl %dl, %edx
+; SSE4-NEXT: sbbb $0, %r8b
+; SSE4-NEXT: movd %xmm6, %r9d
+; SSE4-NEXT: movd %xmm2, %r10d
+; SSE4-NEXT: cmpl %r9d, %r10d
+; SSE4-NEXT: seta %r9b
+; SSE4-NEXT: movzbl %sil, %esi
+; SSE4-NEXT: sbbb $0, %r9b
+; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
+; SSE4-NEXT: movd %xmm3, %r10d
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
+; SSE4-NEXT: movd %xmm2, %r11d
+; SSE4-NEXT: cmpl %r10d, %r11d
+; SSE4-NEXT: seta %r10b
+; SSE4-NEXT: sbbb $0, %r10b
+; SSE4-NEXT: movzbl %dil, %edi
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,3,3,3]
+; SSE4-NEXT: movd %xmm2, %r11d
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; SSE4-NEXT: movd %xmm2, %ebx
+; SSE4-NEXT: cmpl %r11d, %ebx
+; SSE4-NEXT: seta %r11b
+; SSE4-NEXT: movzbl %r8b, %r8d
+; SSE4-NEXT: sbbb $0, %r11b
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; SSE4-NEXT: movd %xmm2, %ebx
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; SSE4-NEXT: movd %xmm2, %ebp
+; SSE4-NEXT: cmpl %ebx, %ebp
+; SSE4-NEXT: seta %bpl
+; SSE4-NEXT: sbbb $0, %bpl
+; SSE4-NEXT: movzbl %r9b, %r9d
+; SSE4-NEXT: movd %xmm5, %ebx
+; SSE4-NEXT: movd %xmm1, %r14d
+; SSE4-NEXT: cmpl %ebx, %r14d
+; SSE4-NEXT: seta %r14b
+; SSE4-NEXT: sbbb $0, %r14b
+; SSE4-NEXT: movzbl %r10b, %r10d
+; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[1,1,1,1]
+; SSE4-NEXT: movd %xmm2, %ebx
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE4-NEXT: movd %xmm1, %r15d
+; SSE4-NEXT: cmpl %ebx, %r15d
+; SSE4-NEXT: seta %bl
+; SSE4-NEXT: movzbl %r11b, %r11d
+; SSE4-NEXT: sbbb $0, %bl
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; SSE4-NEXT: movd %xmm1, %r15d
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
+; SSE4-NEXT: movd %xmm1, %r12d
+; SSE4-NEXT: cmpl %r15d, %r12d
+; SSE4-NEXT: seta %r12b
+; SSE4-NEXT: sbbb $0, %r12b
+; SSE4-NEXT: movzbl %bpl, %ebp
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
+; SSE4-NEXT: movd %xmm1, %r15d
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE4-NEXT: movd %xmm1, %r13d
+; SSE4-NEXT: cmpl %r15d, %r13d
+; SSE4-NEXT: seta %r13b
+; SSE4-NEXT: movzbl %r14b, %r15d
+; SSE4-NEXT: sbbb $0, %r13b
+; SSE4-NEXT: movd %xmm4, %r14d
+; SSE4-NEXT: movd %xmm0, %eax
+; SSE4-NEXT: cmpl %r14d, %eax
+; SSE4-NEXT: seta %r14b
+; SSE4-NEXT: sbbb $0, %r14b
+; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
+; SSE4-NEXT: movd %xmm1, %eax
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE4-NEXT: movd %xmm0, %ecx
+; SSE4-NEXT: cmpl %eax, %ecx
+; SSE4-NEXT: movzbl %bl, %eax
+; SSE4-NEXT: movzbl %r12b, %ecx
+; SSE4-NEXT: movzbl %r13b, %ebx
+; SSE4-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE4-NEXT: # xmm0 = mem[0],zero,zero,zero
+; SSE4-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Folded Reload
+; SSE4-NEXT: # xmm2 = mem[0],zero,zero,zero
+; SSE4-NEXT: movd %edx, %xmm3
+; SSE4-NEXT: movd %esi, %xmm4
+; SSE4-NEXT: movd %edi, %xmm5
+; SSE4-NEXT: movd %r8d, %xmm6
+; SSE4-NEXT: movd %r9d, %xmm1
+; SSE4-NEXT: movd %r10d, %xmm7
+; SSE4-NEXT: movd %r11d, %xmm8
+; SSE4-NEXT: movd %ebp, %xmm9
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE4-NEXT: movd %r15d, %xmm10
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE4-NEXT: movd %eax, %xmm0
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE4-NEXT: movd %ecx, %xmm2
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE4-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3],xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; SSE4-NEXT: movd %ebx, %xmm3
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE4-NEXT: movzbl %r14b, %eax
+; SSE4-NEXT: seta %cl
+; SSE4-NEXT: sbbb $0, %cl
+; SSE4-NEXT: movd %eax, %xmm0
+; SSE4-NEXT: movzbl %cl, %eax
+; SSE4-NEXT: movd %eax, %xmm2
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_wide_vec_op:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pextrd $1, %xmm4, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pextrd $1, %xmm0, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: movd %xmm4, %ecx
+; SSE2-NEXT: movd %xmm0, %edx
+; SSE2-NEXT: cmpl %ecx, %edx
+; SSE2-NEXT: seta %cl
+; SSE2-NEXT: sbbb $0, %cl
+; SSE2-NEXT: movzbl %cl, %ecx
+; SSE2-NEXT: movd %ecx, %xmm0
+; SSE2-NEXT: pinsrb $1, %eax, %xmm0
+; SSE2-NEXT: pextrd $2, %xmm4, %eax
+; SSE2-NEXT: pextrd $2, %xmm8, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $2, %eax, %xmm0
+; SSE2-NEXT: pextrd $3, %xmm4, %eax
+; SSE2-NEXT: pextrd $3, %xmm8, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $3, %eax, %xmm0
+; SSE2-NEXT: movd %xmm5, %eax
+; SSE2-NEXT: movd %xmm1, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $4, %eax, %xmm0
+; SSE2-NEXT: pextrd $1, %xmm5, %eax
+; SSE2-NEXT: pextrd $1, %xmm1, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $5, %eax, %xmm0
+; SSE2-NEXT: pextrd $2, %xmm5, %eax
+; SSE2-NEXT: pextrd $2, %xmm1, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $6, %eax, %xmm0
+; SSE2-NEXT: pextrd $3, %xmm5, %eax
+; SSE2-NEXT: pextrd $3, %xmm1, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $7, %eax, %xmm0
+; SSE2-NEXT: movd %xmm6, %eax
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $8, %eax, %xmm0
+; SSE2-NEXT: pextrd $1, %xmm6, %eax
+; SSE2-NEXT: pextrd $1, %xmm2, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $9, %eax, %xmm0
+; SSE2-NEXT: pextrd $2, %xmm6, %eax
+; SSE2-NEXT: pextrd $2, %xmm2, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $10, %eax, %xmm0
+; SSE2-NEXT: pextrd $3, %xmm6, %eax
+; SSE2-NEXT: pextrd $3, %xmm2, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $11, %eax, %xmm0
+; SSE2-NEXT: movd %xmm7, %eax
+; SSE2-NEXT: movd %xmm3, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $12, %eax, %xmm0
+; SSE2-NEXT: pextrd $1, %xmm7, %eax
+; SSE2-NEXT: pextrd $1, %xmm3, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $13, %eax, %xmm0
+; SSE2-NEXT: pextrd $2, %xmm7, %eax
+; SSE2-NEXT: pextrd $2, %xmm3, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $14, %eax, %xmm0
+; SSE2-NEXT: pextrd $3, %xmm7, %eax
+; SSE2-NEXT: pextrd $3, %xmm3, %ecx
+; SSE2-NEXT: cmpl %eax, %ecx
+; SSE2-NEXT: seta %al
+; SSE2-NEXT: sbbb $0, %al
+; SSE2-NEXT: movzbl %al, %eax
+; SSE2-NEXT: pinsrb $15, %eax, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ucmp_wide_vec_op:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpextrd $1, %xmm2, %eax
+; AVX2-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vmovd %xmm2, %ecx
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: cmpl %ecx, %edx
+; AVX2-NEXT: seta %cl
+; AVX2-NEXT: sbbb $0, %cl
+; AVX2-NEXT: vmovd %ecx, %xmm4
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vpextrd $2, %xmm2, %eax
+; AVX2-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vpextrd $3, %xmm2, %eax
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vpextrd $1, %xmm2, %eax
+; AVX2-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vpextrd $2, %xmm2, %eax
+; AVX2-NEXT: vpextrd $2, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4
+; AVX2-NEXT: vpextrd $3, %xmm2, %eax
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm4, %xmm0
+; AVX2-NEXT: vmovd %xmm3, %eax
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $1, %xmm3, %eax
+; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $2, %xmm3, %eax
+; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $3, %xmm3, %eax
+; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $1, %xmm2, %eax
+; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $2, %xmm2, %eax
+; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $3, %xmm2, %eax
+; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
+; AVX2-NEXT: cmpl %eax, %ecx
+; AVX2-NEXT: seta %al
+; AVX2-NEXT: sbbb $0, %al
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ucmp_wide_vec_op:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpltud %zmm1, %zmm0, %k1
+; AVX512-NEXT: vpcmpnleud %zmm1, %zmm0, %k2
+; AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1}
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
;
; X86-LABEL: ucmp_wide_vec_op:
; X86: # %bb.0:
@@ -1014,411 +1470,1488 @@ define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind {
}
define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind {
-; X64-LABEL: ucmp_uncommon_vectors:
-; X64: # %bb.0:
-; X64-NEXT: pushq %rbp
-; X64-NEXT: pushq %r15
-; X64-NEXT: pushq %r14
-; X64-NEXT: pushq %r13
-; X64-NEXT: pushq %r12
-; X64-NEXT: pushq %rbx
-; X64-NEXT: subq $120, %rsp
-; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: andl $127, %edx
-; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: andl $127, %r8d
-; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12
-; X64-NEXT: andl $127, %r12d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14
-; X64-NEXT: andl $127, %r14d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx
-; X64-NEXT: andl $127, %ebx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15
-; X64-NEXT: andl $127, %r15d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp
-; X64-NEXT: andl $127, %ebp
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r11
-; X64-NEXT: andl $127, %r11d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13
-; X64-NEXT: andl $127, %r13d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; X64-NEXT: andl $127, %r10d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; X64-NEXT: andl $127, %esi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi
-; X64-NEXT: andl $127, %edi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: andl $127, %eax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; X64-NEXT: andl $127, %edx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8
-; X64-NEXT: cmpq %r9, %r8
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: sbbq %rax, %rcx
-; X64-NEXT: setb %cl
-; X64-NEXT: cmpq %r8, %r9
-; X64-NEXT: sbbq %rdx, %rax
-; X64-NEXT: sbbb $0, %cl
-; X64-NEXT: movb %cl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: movq %rdi, %rdx
-; X64-NEXT: sbbq %rsi, %rdx
-; X64-NEXT: setb %dl
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: sbbq %rdi, %rsi
-; X64-NEXT: sbbb $0, %dl
-; X64-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: movq %r10, %rdx
-; X64-NEXT: sbbq %r13, %rdx
-; X64-NEXT: setb %dl
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: sbbq %r10, %r13
-; X64-NEXT: sbbb $0, %dl
-; X64-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: movq %r11, %rdx
-; X64-NEXT: sbbq %rbp, %rdx
-; X64-NEXT: setb %dl
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: sbbq %r11, %rbp
-; X64-NEXT: sbbb $0, %dl
-; X64-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: movq %r15, %rdx
-; X64-NEXT: sbbq %rbx, %rdx
-; X64-NEXT: setb %dl
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: sbbq %r15, %rbx
-; X64-NEXT: sbbb $0, %dl
-; X64-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: cmpq %rax, %rcx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rdx
-; X64-NEXT: sbbq %r14, %rdx
-; X64-NEXT: setb %dl
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: sbbq %rsi, %r14
-; X64-NEXT: sbbb $0, %dl
-; X64-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; X64-NEXT: cmpq %rcx, %rdx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: sbbq %r12, %rax
-; X64-NEXT: setb %r13b
-; X64-NEXT: cmpq %rdx, %rcx
-; X64-NEXT: sbbq %rsi, %r12
-; X64-NEXT: sbbb $0, %r13b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; X64-NEXT: cmpq %rdx, %rsi
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; X64-NEXT: movq %rdi, %rcx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %rcx
-; X64-NEXT: setb %bpl
-; X64-NEXT: cmpq %rsi, %rdx
-; X64-NEXT: sbbq %rdi, %rax
-; X64-NEXT: sbbb $0, %bpl
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi
-; X64-NEXT: cmpq %rsi, %rdi
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %rdx
-; X64-NEXT: setb %r11b
-; X64-NEXT: cmpq %rdi, %rsi
-; X64-NEXT: sbbq %rcx, %rax
-; X64-NEXT: sbbb $0, %r11b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8
-; X64-NEXT: cmpq %rdi, %r8
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %rsi
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %rsi
-; X64-NEXT: setb %sil
-; X64-NEXT: cmpq %r8, %rdi
-; X64-NEXT: sbbq %rcx, %rax
-; X64-NEXT: sbbb $0, %sil
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9
-; X64-NEXT: cmpq %r8, %r9
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %rdi
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %rdi
-; X64-NEXT: setb %dil
-; X64-NEXT: cmpq %r9, %r8
-; X64-NEXT: sbbq %rcx, %rax
-; X64-NEXT: sbbb $0, %dil
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; X64-NEXT: cmpq %r9, %r10
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %r8
-; X64-NEXT: movq (%rsp), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %r8
-; X64-NEXT: setb %r8b
-; X64-NEXT: cmpq %r10, %r9
-; X64-NEXT: sbbq %rcx, %rax
-; X64-NEXT: sbbb $0, %r8b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx
-; X64-NEXT: cmpq %r10, %rbx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %r9
-; X64-NEXT: setb %r9b
-; X64-NEXT: cmpq %rbx, %r10
-; X64-NEXT: sbbq %rcx, %rax
-; X64-NEXT: sbbb $0, %r9b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: cmpq %rax, %rbx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; X64-NEXT: movq %rdx, %r10
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: sbbq %rcx, %r10
-; X64-NEXT: setb %r10b
-; X64-NEXT: cmpq %rbx, %rax
-; X64-NEXT: sbbq %rdx, %rcx
-; X64-NEXT: sbbb $0, %r10b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: cmpq %rcx, %r14
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %rbx
-; X64-NEXT: setb %bl
-; X64-NEXT: cmpq %r14, %rcx
-; X64-NEXT: sbbq %rdx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15
-; X64-NEXT: sbbb $0, %bl
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: cmpq %rcx, %r15
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; X64-NEXT: movq %rdx, %r14
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; X64-NEXT: sbbq %rax, %r14
-; X64-NEXT: setb %r14b
-; X64-NEXT: cmpq %r15, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12
-; X64-NEXT: sbbq %rdx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; X64-NEXT: sbbb $0, %r14b
-; X64-NEXT: cmpq %r12, %rax
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; X64-NEXT: movq %rdx, %r15
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; X64-NEXT: sbbq %rcx, %r15
-; X64-NEXT: setb %r15b
-; X64-NEXT: cmpq %rax, %r12
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm0
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm1
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm2
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm3
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm4
-; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
-; X64-NEXT: movd %eax, %xmm5
-; X64-NEXT: movzbl %r13b, %eax
-; X64-NEXT: movd %eax, %xmm6
-; X64-NEXT: movzbl %bpl, %eax
-; X64-NEXT: movd %eax, %xmm7
-; X64-NEXT: movzbl %r11b, %eax
-; X64-NEXT: movd %eax, %xmm8
-; X64-NEXT: movzbl %sil, %eax
-; X64-NEXT: movd %eax, %xmm9
-; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: movd %eax, %xmm10
-; X64-NEXT: movzbl %r8b, %eax
-; X64-NEXT: movd %eax, %xmm11
-; X64-NEXT: movzbl %r9b, %eax
-; X64-NEXT: movd %eax, %xmm12
-; X64-NEXT: movzbl %r10b, %eax
-; X64-NEXT: movd %eax, %xmm13
-; X64-NEXT: movzbl %bl, %eax
-; X64-NEXT: movd %eax, %xmm14
-; X64-NEXT: movzbl %r14b, %eax
-; X64-NEXT: movd %eax, %xmm15
-; X64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; X64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; X64-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; X64-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; X64-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; X64-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
-; X64-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; X64-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; X64-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; X64-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm7[0]
-; X64-NEXT: sbbq %rdx, %rcx
-; X64-NEXT: sbbb $0, %r15b
-; X64-NEXT: movzbl %r15b, %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; X64-NEXT: movb %al, 4(%rdi)
-; X64-NEXT: movdqa %xmm15, -{{[0-9]+}}(%rsp)
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: leaq (%rcx,%rax,4), %rax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: shll $4, %ecx
-; X64-NEXT: orq %rax, %rcx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: shll $6, %eax
-; X64-NEXT: orq %rcx, %rax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: shll $8, %ecx
-; X64-NEXT: orq %rax, %rcx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: shll $10, %eax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; X64-NEXT: andl $3, %edx
-; X64-NEXT: shll $12, %edx
-; X64-NEXT: orq %rax, %rdx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
-; X64-NEXT: andl $3, %esi
-; X64-NEXT: shll $14, %esi
-; X64-NEXT: orq %rdx, %rsi
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: shll $16, %eax
-; X64-NEXT: orq %rsi, %rax
-; X64-NEXT: orq %rcx, %rax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: shll $18, %ecx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; X64-NEXT: andl $3, %edx
-; X64-NEXT: shll $20, %edx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: shll $22, %ecx
-; X64-NEXT: orq %rdx, %rcx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; X64-NEXT: andl $3, %edx
-; X64-NEXT: shll $24, %edx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; X64-NEXT: andl $3, %ecx
-; X64-NEXT: shlq $26, %rcx
-; X64-NEXT: orq %rdx, %rcx
-; X64-NEXT: orq %rax, %rcx
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; X64-NEXT: andl $3, %eax
-; X64-NEXT: shlq $28, %rax
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; X64-NEXT: andl $3, %edx
-; X64-NEXT: shlq $30, %rdx
-; X64-NEXT: orq %rax, %rdx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movl %edx, (%rdi)
-; X64-NEXT: addq $120, %rsp
-; X64-NEXT: popq %rbx
-; X64-NEXT: popq %r12
-; X64-NEXT: popq %r13
-; X64-NEXT: popq %r14
-; X64-NEXT: popq %r15
-; X64-NEXT: popq %rbp
-; X64-NEXT: retq
+; SSE4-LABEL: ucmp_uncommon_vectors:
+; SSE4: # %bb.0:
+; SSE4-NEXT: pushq %rbp
+; SSE4-NEXT: pushq %r15
+; SSE4-NEXT: pushq %r14
+; SSE4-NEXT: pushq %r13
+; SSE4-NEXT: pushq %r12
+; SSE4-NEXT: pushq %rbx
+; SSE4-NEXT: subq $120, %rsp
+; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: andl $127, %edx
+; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: andl $127, %r8d
+; SSE4-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE4-NEXT: andl $127, %r10d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: andl $127, %eax
+; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: andl $127, %ecx
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE4-NEXT: andl $127, %r8d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE4-NEXT: andl $127, %ebx
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE4-NEXT: andl $127, %edx
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; SSE4-NEXT: andl $127, %r13d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE4-NEXT: andl $127, %r11d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE4-NEXT: andl $127, %r14d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE4-NEXT: andl $127, %r12d
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; SSE4-NEXT: cmpq %rax, %rbp
+; SSE4-NEXT: movq %r12, %r15
+; SSE4-NEXT: sbbq %r14, %r15
+; SSE4-NEXT: setb %r15b
+; SSE4-NEXT: cmpq %rbp, %rax
+; SSE4-NEXT: sbbq %r12, %r14
+; SSE4-NEXT: sbbb $0, %r15b
+; SSE4-NEXT: movb %r15b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE4-NEXT: cmpq %rax, %r14
+; SSE4-NEXT: movq %r11, %r15
+; SSE4-NEXT: sbbq %r13, %r15
+; SSE4-NEXT: setb %bpl
+; SSE4-NEXT: cmpq %r14, %rax
+; SSE4-NEXT: sbbq %r11, %r13
+; SSE4-NEXT: sbbb $0, %bpl
+; SSE4-NEXT: movb %bpl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE4-NEXT: cmpq %rax, %r11
+; SSE4-NEXT: movq %rdx, %r14
+; SSE4-NEXT: sbbq %rbx, %r14
+; SSE4-NEXT: setb %bpl
+; SSE4-NEXT: cmpq %r11, %rax
+; SSE4-NEXT: sbbq %rdx, %rbx
+; SSE4-NEXT: sbbb $0, %bpl
+; SSE4-NEXT: movb %bpl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE4-NEXT: cmpq %rax, %rdx
+; SSE4-NEXT: movq %r8, %r11
+; SSE4-NEXT: sbbq %rcx, %r11
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: cmpq %rdx, %rax
+; SSE4-NEXT: sbbq %r8, %rcx
+; SSE4-NEXT: sbbb $0, %r11b
+; SSE4-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: movq %r8, %rdx
+; SSE4-NEXT: sbbq %r10, %rdx
+; SSE4-NEXT: setb %dl
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r8, %r10
+; SSE4-NEXT: sbbb $0, %dl
+; SSE4-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rdx
+; SSE4-NEXT: setb %r10b
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r11, %r8
+; SSE4-NEXT: sbbb $0, %r10b
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rdx
+; SSE4-NEXT: setb %dl
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r11, %r8
+; SSE4-NEXT: sbbb $0, %dl
+; SSE4-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rdx
+; SSE4-NEXT: setb %bpl
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r11, %r8
+; SSE4-NEXT: sbbb $0, %bpl
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE4-NEXT: movq %r11, %rdx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rdx
+; SSE4-NEXT: setb %dl
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r11, %r8
+; SSE4-NEXT: sbbb $0, %dl
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE4-NEXT: movq %r14, %r8
+; SSE4-NEXT: movq (%rsp), %rbx # 8-byte Reload
+; SSE4-NEXT: sbbq %rbx, %r8
+; SSE4-NEXT: setb %r11b
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r14, %rbx
+; SSE4-NEXT: sbbb $0, %r11b
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE4-NEXT: cmpq %rax, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE4-NEXT: movq %r14, %rbx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rbx
+; SSE4-NEXT: setb %bl
+; SSE4-NEXT: cmpq %rcx, %rax
+; SSE4-NEXT: sbbq %r14, %r8
+; SSE4-NEXT: sbbb $0, %bl
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE4-NEXT: cmpq %rax, %r14
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE4-NEXT: movq %r15, %rcx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %rcx
+; SSE4-NEXT: setb %cl
+; SSE4-NEXT: cmpq %r14, %rax
+; SSE4-NEXT: sbbq %r15, %r8
+; SSE4-NEXT: sbbb $0, %cl
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE4-NEXT: cmpq %rax, %r15
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE4-NEXT: movq %r12, %r14
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %r14
+; SSE4-NEXT: setb %r14b
+; SSE4-NEXT: cmpq %r15, %rax
+; SSE4-NEXT: sbbq %r12, %r8
+; SSE4-NEXT: sbbb $0, %r14b
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: cmpq %r9, %rax
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE4-NEXT: movq %r12, %r15
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %r15
+; SSE4-NEXT: setb %r15b
+; SSE4-NEXT: cmpq %rax, %r9
+; SSE4-NEXT: sbbq %r12, %r8
+; SSE4-NEXT: sbbb $0, %r15b
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE4-NEXT: cmpq %r12, %rax
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; SSE4-NEXT: movq %r13, %r9
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: sbbq %r8, %r9
+; SSE4-NEXT: setb %r9b
+; SSE4-NEXT: cmpq %rax, %r12
+; SSE4-NEXT: sbbq %r13, %r8
+; SSE4-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE4-NEXT: sbbb $0, %r9b
+; SSE4-NEXT: cmpq %rsi, %r12
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: movq %r8, %rdi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: sbbq %rax, %rdi
+; SSE4-NEXT: setb %dil
+; SSE4-NEXT: cmpq %r12, %rsi
+; SSE4-NEXT: sbbq %r8, %rax
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; SSE4-NEXT: sbbb $0, %dil
+; SSE4-NEXT: cmpq %r12, %r13
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE4-NEXT: movq %r8, %rsi
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: sbbq %rax, %rsi
+; SSE4-NEXT: setb %sil
+; SSE4-NEXT: cmpq %r13, %r12
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r12d, %xmm1
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r12d, %xmm2
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r12d, %xmm3
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r12d, %xmm4
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r12d, %xmm5
+; SSE4-NEXT: movzbl %r10b, %r10d
+; SSE4-NEXT: movd %r10d, %xmm6
+; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
+; SSE4-NEXT: movd %r10d, %xmm7
+; SSE4-NEXT: movzbl %bpl, %r10d
+; SSE4-NEXT: movd %r10d, %xmm0
+; SSE4-NEXT: movzbl %dl, %edx
+; SSE4-NEXT: movd %edx, %xmm8
+; SSE4-NEXT: movzbl %r11b, %edx
+; SSE4-NEXT: movd %edx, %xmm9
+; SSE4-NEXT: movzbl %bl, %edx
+; SSE4-NEXT: movd %edx, %xmm10
+; SSE4-NEXT: movzbl %cl, %ecx
+; SSE4-NEXT: movd %ecx, %xmm11
+; SSE4-NEXT: movzbl %r14b, %ecx
+; SSE4-NEXT: movd %ecx, %xmm12
+; SSE4-NEXT: movzbl %r15b, %ecx
+; SSE4-NEXT: movd %ecx, %xmm13
+; SSE4-NEXT: movzbl %r9b, %ecx
+; SSE4-NEXT: movd %ecx, %xmm14
+; SSE4-NEXT: movzbl %dil, %ecx
+; SSE4-NEXT: movd %ecx, %xmm15
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; SSE4-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; SSE4-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
+; SSE4-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
+; SSE4-NEXT: sbbq %r8, %rax
+; SSE4-NEXT: sbbb $0, %sil
+; SSE4-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm0[0]
+; SSE4-NEXT: movzbl %sil, %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE4-NEXT: movb %cl, 4(%rax)
+; SSE4-NEXT: movdqa %xmm15, -{{[0-9]+}}(%rsp)
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: leaq (%rdx,%rcx,4), %rcx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: shll $4, %edx
+; SSE4-NEXT: orq %rcx, %rdx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: shll $6, %ecx
+; SSE4-NEXT: orq %rdx, %rcx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: shll $8, %edx
+; SSE4-NEXT: orq %rcx, %rdx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: shll $10, %ecx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE4-NEXT: andl $3, %esi
+; SSE4-NEXT: shll $12, %esi
+; SSE4-NEXT: orq %rcx, %rsi
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE4-NEXT: andl $3, %edi
+; SSE4-NEXT: shll $14, %edi
+; SSE4-NEXT: orq %rsi, %rdi
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: shll $16, %ecx
+; SSE4-NEXT: orq %rdi, %rcx
+; SSE4-NEXT: orq %rdx, %rcx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: shll $18, %edx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE4-NEXT: andl $3, %esi
+; SSE4-NEXT: shll $20, %esi
+; SSE4-NEXT: orq %rdx, %rsi
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: shll $22, %edx
+; SSE4-NEXT: orq %rsi, %rdx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE4-NEXT: andl $3, %esi
+; SSE4-NEXT: shll $24, %esi
+; SSE4-NEXT: orq %rdx, %rsi
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE4-NEXT: andl $3, %edx
+; SSE4-NEXT: shlq $26, %rdx
+; SSE4-NEXT: orq %rsi, %rdx
+; SSE4-NEXT: orq %rcx, %rdx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE4-NEXT: andl $3, %ecx
+; SSE4-NEXT: shlq $28, %rcx
+; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE4-NEXT: andl $3, %esi
+; SSE4-NEXT: shlq $30, %rsi
+; SSE4-NEXT: orq %rcx, %rsi
+; SSE4-NEXT: orq %rdx, %rsi
+; SSE4-NEXT: movl %esi, (%rax)
+; SSE4-NEXT: addq $120, %rsp
+; SSE4-NEXT: popq %rbx
+; SSE4-NEXT: popq %r12
+; SSE4-NEXT: popq %r13
+; SSE4-NEXT: popq %r14
+; SSE4-NEXT: popq %r15
+; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: retq
+;
+; SSE2-LABEL: ucmp_uncommon_vectors:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbp
+; SSE2-NEXT: pushq %r15
+; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: subq $88, %rsp
+; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: andl $127, %r8d
+; SSE2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: andl $127, %edx
+; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: andl $127, %ecx
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: andl $127, %eax
+; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE2-NEXT: andl $127, %ebx
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE2-NEXT: andl $127, %edx
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE2-NEXT: andl $127, %r10d
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE2-NEXT: andl $127, %r14d
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; SSE2-NEXT: andl $127, %ebp
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; SSE2-NEXT: andl $127, %r13d
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE2-NEXT: andl $127, %r11d
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE2-NEXT: andl $127, %r15d
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE2-NEXT: cmpq %rax, %r12
+; SSE2-NEXT: movq %r15, %r8
+; SSE2-NEXT: sbbq %r11, %r8
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: cmpq %r12, %rax
+; SSE2-NEXT: sbbq %r15, %r11
+; SSE2-NEXT: sbbb $0, %r8b
+; SSE2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE2-NEXT: cmpq %rax, %r8
+; SSE2-NEXT: movq %r13, %r11
+; SSE2-NEXT: sbbq %rbp, %r11
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: cmpq %r8, %rax
+; SSE2-NEXT: sbbq %r13, %rbp
+; SSE2-NEXT: sbbb $0, %r11b
+; SSE2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE2-NEXT: cmpq %rax, %r8
+; SSE2-NEXT: movq %r14, %r11
+; SSE2-NEXT: sbbq %r10, %r11
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: cmpq %r8, %rax
+; SSE2-NEXT: sbbq %r14, %r10
+; SSE2-NEXT: sbbb $0, %r11b
+; SSE2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE2-NEXT: cmpq %rax, %r8
+; SSE2-NEXT: movq %rdx, %r10
+; SSE2-NEXT: sbbq %rbx, %r10
+; SSE2-NEXT: setb %r10b
+; SSE2-NEXT: cmpq %r8, %rax
+; SSE2-NEXT: sbbq %rdx, %rbx
+; SSE2-NEXT: sbbb $0, %r10b
+; SSE2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE2-NEXT: cmpq %rax, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: movq %r10, %r8
+; SSE2-NEXT: sbbq %rcx, %r8
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: cmpq %rdx, %rax
+; SSE2-NEXT: sbbq %r10, %rcx
+; SSE2-NEXT: sbbb $0, %r8b
+; SSE2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: movq %r10, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: sbbq %r8, %rdx
+; SSE2-NEXT: setb %dl
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %r10, %r8
+; SSE2-NEXT: sbbb $0, %dl
+; SSE2-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: movq %r10, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; SSE2-NEXT: sbbq %r8, %rdx
+; SSE2-NEXT: setb %dl
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %r10, %r8
+; SSE2-NEXT: sbbb $0, %dl
+; SSE2-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; SSE2-NEXT: movq %r11, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: sbbq %r10, %rdx
+; SSE2-NEXT: setb %r8b
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %r11, %r10
+; SSE2-NEXT: sbbb $0, %r8b
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: movq %rbx, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: sbbq %r10, %rdx
+; SSE2-NEXT: setb %r11b
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %rbx, %r10
+; SSE2-NEXT: sbbb $0, %r11b
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: movq %rbx, %rdx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE2-NEXT: sbbq %r10, %rdx
+; SSE2-NEXT: setb %dl
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %rbx, %r10
+; SSE2-NEXT: sbbb $0, %dl
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE2-NEXT: cmpq %rax, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE2-NEXT: movq %r14, %r10
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; SSE2-NEXT: sbbq %rbx, %r10
+; SSE2-NEXT: setb %r10b
+; SSE2-NEXT: cmpq %rcx, %rax
+; SSE2-NEXT: sbbq %r14, %rbx
+; SSE2-NEXT: sbbb $0, %r10b
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE2-NEXT: cmpq %rax, %rbx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: movq %r15, %rcx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE2-NEXT: sbbq %r14, %rcx
+; SSE2-NEXT: setb %cl
+; SSE2-NEXT: cmpq %rbx, %rax
+; SSE2-NEXT: sbbq %r15, %r14
+; SSE2-NEXT: sbbb $0, %cl
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE2-NEXT: cmpq %rax, %r14
+; SSE2-NEXT: movq (%rsp), %r12 # 8-byte Reload
+; SSE2-NEXT: movq %r12, %rbx
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: sbbq %r15, %rbx
+; SSE2-NEXT: setb %bl
+; SSE2-NEXT: cmpq %r14, %rax
+; SSE2-NEXT: sbbq %r12, %r15
+; SSE2-NEXT: sbbb $0, %bl
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: cmpq %r9, %rax
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE2-NEXT: movq %r12, %r14
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: sbbq %r15, %r14
+; SSE2-NEXT: setb %bpl
+; SSE2-NEXT: cmpq %rax, %r9
+; SSE2-NEXT: sbbq %r12, %r15
+; SSE2-NEXT: sbbb $0, %bpl
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; SSE2-NEXT: cmpq %rsi, %rax
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: movq %r15, %r9
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE2-NEXT: sbbq %r14, %r9
+; SSE2-NEXT: setb %r9b
+; SSE2-NEXT: cmpq %rax, %rsi
+; SSE2-NEXT: sbbq %r15, %r14
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; SSE2-NEXT: sbbb $0, %r9b
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE2-NEXT: cmpq %r15, %rsi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE2-NEXT: movq %r12, %rdi
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; SSE2-NEXT: sbbq %r14, %rdi
+; SSE2-NEXT: setb %dil
+; SSE2-NEXT: cmpq %rsi, %r15
+; SSE2-NEXT: sbbq %r12, %r14
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE2-NEXT: sbbb $0, %dil
+; SSE2-NEXT: cmpq %rsi, %r14
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; SSE2-NEXT: movq %r13, %r15
+; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; SSE2-NEXT: sbbq %r12, %r15
+; SSE2-NEXT: setb %r15b
+; SSE2-NEXT: cmpq %r14, %rsi
+; SSE2-NEXT: sbbq %r13, %r12
+; SSE2-NEXT: sbbb $0, %r15b
+; SSE2-NEXT: movzbl %r15b, %esi
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: movb %sil, 4(%rax)
+; SSE2-NEXT: movzbl %dil, %esi
+; SSE2-NEXT: movzbl %r9b, %edi
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: andl $3, %edi
+; SSE2-NEXT: leaq (%rdi,%rsi,4), %rsi
+; SSE2-NEXT: movzbl %bpl, %edi
+; SSE2-NEXT: andl $3, %edi
+; SSE2-NEXT: shll $4, %edi
+; SSE2-NEXT: orq %rsi, %rdi
+; SSE2-NEXT: movzbl %bl, %r9d
+; SSE2-NEXT: andl $3, %r9d
+; SSE2-NEXT: shll $6, %r9d
+; SSE2-NEXT: orq %rdi, %r9
+; SSE2-NEXT: movzbl %cl, %esi
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: shll $8, %esi
+; SSE2-NEXT: orq %r9, %rsi
+; SSE2-NEXT: movzbl %dl, %ecx
+; SSE2-NEXT: movzbl %r10b, %edx
+; SSE2-NEXT: andl $3, %edx
+; SSE2-NEXT: shll $10, %edx
+; SSE2-NEXT: andl $3, %ecx
+; SSE2-NEXT: shll $12, %ecx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: movzbl %r11b, %edx
+; SSE2-NEXT: andl $3, %edx
+; SSE2-NEXT: shll $14, %edx
+; SSE2-NEXT: orq %rcx, %rdx
+; SSE2-NEXT: movzbl %r8b, %ecx
+; SSE2-NEXT: andl $3, %ecx
+; SSE2-NEXT: shll $16, %ecx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: orq %rsi, %rcx
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: shll $18, %esi
+; SSE2-NEXT: andl $3, %edx
+; SSE2-NEXT: shll $20, %edx
+; SSE2-NEXT: orq %rsi, %rdx
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: shll $22, %esi
+; SSE2-NEXT: orq %rdx, %rsi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; SSE2-NEXT: andl $3, %edx
+; SSE2-NEXT: shll $24, %edx
+; SSE2-NEXT: orq %rsi, %rdx
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; SSE2-NEXT: andl $3, %esi
+; SSE2-NEXT: shlq $26, %rsi
+; SSE2-NEXT: orq %rdx, %rsi
+; SSE2-NEXT: orq %rcx, %rsi
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload
+; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; SSE2-NEXT: andl $3, %edx
+; SSE2-NEXT: shlq $28, %rdx
+; SSE2-NEXT: andl $3, %ecx
+; SSE2-NEXT: shlq $30, %rcx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: orq %rsi, %rcx
+; SSE2-NEXT: movl %ecx, (%rax)
+; SSE2-NEXT: addq $88, %rsp
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
+; SSE2-NEXT: popq %r14
+; SSE2-NEXT: popq %r15
+; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: ucmp_uncommon_vectors:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: subq $88, %rsp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: andl $127, %r8d
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: andl $127, %edx
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX2-NEXT: andl $127, %r15d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: andl $127, %eax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: andl $127, %r14d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: andl $127, %edx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; AVX2-NEXT: andl $127, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: andl $127, %r8d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: andl $127, %r12d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: andl $127, %r13d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: cmpq %rbx, %r11
+; AVX2-NEXT: movq %r13, %r10
+; AVX2-NEXT: sbbq %r12, %r10
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: cmpq %r11, %rbx
+; AVX2-NEXT: sbbq %r13, %r12
+; AVX2-NEXT: sbbb $0, %r10b
+; AVX2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: cmpq %r10, %r11
+; AVX2-NEXT: movq %r8, %rbx
+; AVX2-NEXT: sbbq %rbp, %rbx
+; AVX2-NEXT: setb %bl
+; AVX2-NEXT: cmpq %r11, %r10
+; AVX2-NEXT: sbbq %r8, %rbp
+; AVX2-NEXT: sbbb $0, %bl
+; AVX2-NEXT: movb %bl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: cmpq %r8, %r10
+; AVX2-NEXT: movq %rdx, %r11
+; AVX2-NEXT: sbbq %r14, %r11
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: cmpq %r10, %r8
+; AVX2-NEXT: sbbq %rdx, %r14
+; AVX2-NEXT: sbbb $0, %r11b
+; AVX2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: cmpq %rdx, %r8
+; AVX2-NEXT: movq %rax, %r10
+; AVX2-NEXT: sbbq %r15, %r10
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: cmpq %r8, %rdx
+; AVX2-NEXT: sbbq %rax, %r15
+; AVX2-NEXT: sbbb $0, %r10b
+; AVX2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: movq %r11, %r8
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: sbbq %r10, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: cmpq %rdx, %rax
+; AVX2-NEXT: sbbq %r11, %r10
+; AVX2-NEXT: sbbb $0, %r8b
+; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: movq %r11, %r8
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: sbbq %r10, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: cmpq %rdx, %rax
+; AVX2-NEXT: sbbq %r11, %r10
+; AVX2-NEXT: sbbb $0, %r8b
+; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: movq %r11, %r8
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: sbbq %r10, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: cmpq %rdx, %rax
+; AVX2-NEXT: sbbq %r11, %r10
+; AVX2-NEXT: sbbb $0, %r8b
+; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: movq %r11, %r8
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: sbbq %r10, %r8
+; AVX2-NEXT: setb %r12b
+; AVX2-NEXT: cmpq %rdx, %rax
+; AVX2-NEXT: sbbq %r11, %r10
+; AVX2-NEXT: sbbb $0, %r12b
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: cmpq %rax, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: movq %r11, %r8
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: sbbq %r10, %r8
+; AVX2-NEXT: setb %r8b
+; AVX2-NEXT: cmpq %rdx, %rax
+; AVX2-NEXT: sbbq %r11, %r10
+; AVX2-NEXT: sbbb $0, %r8b
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: cmpq %rax, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: movq %rbx, %rdx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX2-NEXT: sbbq %r11, %rdx
+; AVX2-NEXT: setb %dl
+; AVX2-NEXT: cmpq %r10, %rax
+; AVX2-NEXT: sbbq %rbx, %r11
+; AVX2-NEXT: sbbb $0, %dl
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: cmpq %rax, %r11
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX2-NEXT: movq %r14, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX2-NEXT: sbbq %rbx, %r10
+; AVX2-NEXT: setb %r10b
+; AVX2-NEXT: cmpq %r11, %rax
+; AVX2-NEXT: sbbq %r14, %rbx
+; AVX2-NEXT: sbbb $0, %r10b
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: cmpq %rax, %rbx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: movq %r15, %r11
+; AVX2-NEXT: movq (%rsp), %r14 # 8-byte Reload
+; AVX2-NEXT: sbbq %r14, %r11
+; AVX2-NEXT: setb %r11b
+; AVX2-NEXT: cmpq %rbx, %rax
+; AVX2-NEXT: sbbq %r15, %r14
+; AVX2-NEXT: sbbb $0, %r11b
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: cmpq %rax, %r14
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT: movq %r13, %rbx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: sbbq %r15, %rbx
+; AVX2-NEXT: setb %bl
+; AVX2-NEXT: cmpq %r14, %rax
+; AVX2-NEXT: sbbq %r13, %r15
+; AVX2-NEXT: sbbb $0, %bl
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: cmpq %r9, %rax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT: movq %r13, %r14
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: sbbq %r15, %r14
+; AVX2-NEXT: setb %bpl
+; AVX2-NEXT: cmpq %rax, %r9
+; AVX2-NEXT: sbbq %r13, %r15
+; AVX2-NEXT: sbbb $0, %bpl
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: cmpq %rsi, %rax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: movq %r15, %r9
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX2-NEXT: sbbq %r14, %r9
+; AVX2-NEXT: setb %r9b
+; AVX2-NEXT: cmpq %rax, %rsi
+; AVX2-NEXT: sbbq %r15, %r14
+; AVX2-NEXT: sbbb $0, %r9b
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: cmpq %rcx, %rax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: movq %r15, %rsi
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX2-NEXT: sbbq %r14, %rsi
+; AVX2-NEXT: setb %sil
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: sbbq %r15, %r14
+; AVX2-NEXT: sbbb $0, %sil
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX2-NEXT: cmpq %rax, %rcx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
+; AVX2-NEXT: movq %r13, %r14
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX2-NEXT: sbbq %r15, %r14
+; AVX2-NEXT: setb %r14b
+; AVX2-NEXT: cmpq %rcx, %rax
+; AVX2-NEXT: sbbq %r13, %r15
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: sbbb $0, %r14b
+; AVX2-NEXT: movzbl %r14b, %ecx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: movb %cl, 4(%rdi)
+; AVX2-NEXT: movzbl %sil, %ecx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: movzbl %r9b, %esi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: leaq (%rsi,%rcx,4), %rcx
+; AVX2-NEXT: movzbl %bpl, %esi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: shll $4, %esi
+; AVX2-NEXT: orq %rcx, %rsi
+; AVX2-NEXT: movzbl %bl, %ecx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: shll $6, %ecx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: movzbl %r11b, %esi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: shll $8, %esi
+; AVX2-NEXT: orq %rcx, %rsi
+; AVX2-NEXT: movzbl %r10b, %ecx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: shll $10, %ecx
+; AVX2-NEXT: movzbl %dl, %edx
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: shll $12, %edx
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: movzbl %r8b, %edi
+; AVX2-NEXT: andl $3, %edi
+; AVX2-NEXT: shll $14, %edi
+; AVX2-NEXT: orq %rdx, %rdi
+; AVX2-NEXT: movzbl %r12b, %ecx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: shll $16, %ecx
+; AVX2-NEXT: orq %rdi, %rcx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: shll $18, %edx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: shll $20, %esi
+; AVX2-NEXT: orq %rdx, %rsi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: shll $22, %edx
+; AVX2-NEXT: orq %rsi, %rdx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: shll $24, %esi
+; AVX2-NEXT: orq %rdx, %rsi
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: shlq $26, %rdx
+; AVX2-NEXT: orq %rsi, %rdx
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: shlq $28, %rcx
+; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: shlq $30, %rsi
+; AVX2-NEXT: orq %rcx, %rsi
+; AVX2-NEXT: orq %rdx, %rsi
+; AVX2-NEXT: movl %esi, (%rax)
+; AVX2-NEXT: addq $88, %rsp
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ucmp_uncommon_vectors:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: subq $88, %rsp
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: andl $127, %r8d
+; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: andl $127, %edx
+; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; AVX512-NEXT: andl $127, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: andl $127, %r12d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: andl $127, %r13d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX512-NEXT: andl $127, %r15d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: andl $127, %r10d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: andl $127, %ebx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: andl $127, %r8d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT: andl $127, %r9d
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: andl $127, %esi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT: andl $127, %edi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: andl $127, %eax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: andl $127, %edx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: cmpq %r14, %r11
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: sbbq %rax, %rcx
+; AVX512-NEXT: setb %cl
+; AVX512-NEXT: cmpq %r11, %r14
+; AVX512-NEXT: sbbq %rdx, %rax
+; AVX512-NEXT: sbbb $0, %cl
+; AVX512-NEXT: movb %cl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq %rdi, %rdx
+; AVX512-NEXT: sbbq %rsi, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %rdi, %rsi
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq %r9, %rdx
+; AVX512-NEXT: sbbq %r8, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %r9, %r8
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq %rbx, %rdx
+; AVX512-NEXT: sbbq %r10, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %rbx, %r10
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq %r15, %rdx
+; AVX512-NEXT: sbbq %r13, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %r15, %r13
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq %r12, %rdx
+; AVX512-NEXT: sbbq %rbp, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %r12, %rbp
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: movq %rdi, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: sbbq %rsi, %rdx
+; AVX512-NEXT: setb %r13b
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %rdi, %rsi
+; AVX512-NEXT: sbbb $0, %r13b
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: cmpq %rax, %rcx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: movq %rdi, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: sbbq %rsi, %rdx
+; AVX512-NEXT: setb %bpl
+; AVX512-NEXT: cmpq %rcx, %rax
+; AVX512-NEXT: sbbq %rdi, %rsi
+; AVX512-NEXT: sbbb $0, %bpl
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: cmpq %rcx, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: movq %rdi, %rax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: sbbq %rsi, %rax
+; AVX512-NEXT: setb %r9b
+; AVX512-NEXT: cmpq %rdx, %rcx
+; AVX512-NEXT: sbbq %rdi, %rsi
+; AVX512-NEXT: sbbb $0, %r9b
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: cmpq %rdx, %rsi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; AVX512-NEXT: movq %rdi, %rcx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: sbbq %rax, %rcx
+; AVX512-NEXT: setb %cl
+; AVX512-NEXT: cmpq %rsi, %rdx
+; AVX512-NEXT: sbbq %rdi, %rax
+; AVX512-NEXT: sbbb $0, %cl
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT: cmpq %rsi, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT: movq %r8, %rdx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: sbbq %rax, %rdx
+; AVX512-NEXT: setb %dl
+; AVX512-NEXT: cmpq %rdi, %rsi
+; AVX512-NEXT: sbbq %r8, %rax
+; AVX512-NEXT: sbbb $0, %dl
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: cmpq %rdi, %r8
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: movq %r10, %rsi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: sbbq %rax, %rsi
+; AVX512-NEXT: setb %sil
+; AVX512-NEXT: cmpq %r8, %rdi
+; AVX512-NEXT: sbbq %r10, %rax
+; AVX512-NEXT: sbbb $0, %sil
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: cmpq %r8, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
+; AVX512-NEXT: movq %r11, %rdi
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: sbbq %rax, %rdi
+; AVX512-NEXT: setb %dil
+; AVX512-NEXT: cmpq %r10, %r8
+; AVX512-NEXT: sbbq %r11, %rax
+; AVX512-NEXT: sbbb $0, %dil
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: cmpq %rax, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: movq %rbx, %r8
+; AVX512-NEXT: movq (%rsp), %r11 # 8-byte Reload
+; AVX512-NEXT: sbbq %r11, %r8
+; AVX512-NEXT: setb %r8b
+; AVX512-NEXT: cmpq %r10, %rax
+; AVX512-NEXT: sbbq %rbx, %r11
+; AVX512-NEXT: sbbb $0, %r8b
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; AVX512-NEXT: cmpq %rbx, %r11
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX512-NEXT: movq %r14, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: sbbq %rax, %r10
+; AVX512-NEXT: setb %r10b
+; AVX512-NEXT: cmpq %r11, %rbx
+; AVX512-NEXT: sbbq %r14, %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: sbbb $0, %r10b
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX512-NEXT: cmpq %r15, %r11
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: movq %rax, %rbx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX512-NEXT: sbbq %r14, %rbx
+; AVX512-NEXT: setb %bl
+; AVX512-NEXT: cmpq %r11, %r15
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: sbbq %rax, %r14
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: sbbb $0, %bl
+; AVX512-NEXT: cmpq %r11, %r14
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX512-NEXT: movq %rax, %r15
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; AVX512-NEXT: sbbq %r12, %r15
+; AVX512-NEXT: setb %r15b
+; AVX512-NEXT: cmpq %r14, %r11
+; AVX512-NEXT: sbbq %rax, %r12
+; AVX512-NEXT: sbbb $0, %r15b
+; AVX512-NEXT: movzbl %r15b, %r11d
+; AVX512-NEXT: andl $3, %r11d
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; AVX512-NEXT: movb %r11b, 4(%r14)
+; AVX512-NEXT: movzbl %bl, %r11d
+; AVX512-NEXT: andl $3, %r11d
+; AVX512-NEXT: movzbl %r10b, %r10d
+; AVX512-NEXT: andl $3, %r10d
+; AVX512-NEXT: leaq (%r10,%r11,4), %r10
+; AVX512-NEXT: movzbl %r8b, %r8d
+; AVX512-NEXT: andl $3, %r8d
+; AVX512-NEXT: shll $4, %r8d
+; AVX512-NEXT: orq %r10, %r8
+; AVX512-NEXT: movzbl %dil, %edi
+; AVX512-NEXT: andl $3, %edi
+; AVX512-NEXT: shll $6, %edi
+; AVX512-NEXT: orq %r8, %rdi
+; AVX512-NEXT: movzbl %sil, %esi
+; AVX512-NEXT: andl $3, %esi
+; AVX512-NEXT: shll $8, %esi
+; AVX512-NEXT: orq %rdi, %rsi
+; AVX512-NEXT: movzbl %dl, %edx
+; AVX512-NEXT: andl $3, %edx
+; AVX512-NEXT: shll $10, %edx
+; AVX512-NEXT: movzbl %cl, %ecx
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: shll $12, %ecx
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: movzbl %r9b, %edx
+; AVX512-NEXT: andl $3, %edx
+; AVX512-NEXT: shll $14, %edx
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: movzbl %bpl, %eax
+; AVX512-NEXT: andl $3, %eax
+; AVX512-NEXT: shll $16, %eax
+; AVX512-NEXT: orq %rdx, %rax
+; AVX512-NEXT: orq %rsi, %rax
+; AVX512-NEXT: movzbl %r13b, %ecx
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: shll $18, %ecx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %edx
+; AVX512-NEXT: shll $20, %edx
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: shll $22, %ecx
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %edx
+; AVX512-NEXT: shll $24, %edx
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: shlq $26, %rcx
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: orq %rax, %rcx
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %eax
+; AVX512-NEXT: shlq $28, %rax
+; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload
+; AVX512-NEXT: andl $3, %edx
+; AVX512-NEXT: shlq $30, %rdx
+; AVX512-NEXT: orq %rax, %rdx
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: movq %r14, %rax
+; AVX512-NEXT: movl %edx, (%r14)
+; AVX512-NEXT: addq $88, %rsp
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
;
; X86-LABEL: ucmp_uncommon_vectors:
; X86: # %bb.0:
More information about the llvm-commits
mailing list