[llvm] e209190 - [SDAG] enable binop identity constant folds for multiplies
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 25 04:07:21 PDT 2022
Author: Simon Pilgrim
Date: 2022-03-25T11:07:04Z
New Revision: e209190c2d96a8bde115c7155f099d344c456b5b
URL: https://github.com/llvm/llvm-project/commit/e209190c2d96a8bde115c7155f099d344c456b5b
DIFF: https://github.com/llvm/llvm-project/commit/e209190c2d96a8bde115c7155f099d344c456b5b.diff
LOG: [SDAG] enable binop identity constant folds for multiplies
Add mul to the list of ops that we canonicalize with a select to expose an identity merge
Differential Revision: https://reviews.llvm.org/D122071
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
llvm/test/CodeGen/X86/vector-bo-select.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 708f9c502cd61..c3b12f8b923fa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2167,6 +2167,8 @@ static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG,
case ISD::SRA: // X s>> 0 --> X
case ISD::SRL: // X u>> 0 --> X
return C->isZero();
+ case ISD::MUL: // X * 1 --> X
+ return C->isOne();
}
}
return false;
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index e6eee0d9a02f2..9557463961249 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -144,14 +144,12 @@ define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwi
; AVX512VL-LABEL: vec128_i32_signed_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
@@ -313,14 +311,12 @@ define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) noun
; AVX512VL-LABEL: vec128_i32_unsigned_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpnleud %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminud %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
@@ -483,14 +479,12 @@ define <4 x i32> @vec128_i32_signed_mem_reg(<4 x i32>* %a1_addr, <4 x i32> %a2)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm0, %xmm1, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpmulld %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubd %xmm0, %xmm2, %xmm0 {%k1}
; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
@@ -652,14 +646,12 @@ define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, <4 x i32>* %a2_addr)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
@@ -829,14 +821,12 @@ define <4 x i32> @vec128_i32_signed_mem_mem(<4 x i32>* %a1_addr, <4 x i32>* %a2_
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
;
@@ -1049,22 +1039,13 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
; AVX512VL-LABEL: vec128_i64_signed_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX512VL-NEXT: vpsrlq $33, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX512VL-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
-; AVX512VL-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX512VL-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
@@ -1281,22 +1262,13 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
; AVX512VL-LABEL: vec128_i64_unsigned_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX512VL-NEXT: vpsrlq $33, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX512VL-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
-; AVX512VL-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX512VL-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
@@ -1515,22 +1487,13 @@ define <2 x i64> @vec128_i64_signed_mem_reg(<2 x i64>* %a1_addr, <2 x i64> %a2)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm0, %xmm1, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm0, %xmm1, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vpsubq %xmm2, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlq $1, %xmm0, %xmm2
-; AVX512VL-NEXT: vpsrlq $33, %xmm0, %xmm0
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
-; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX512VL-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm4, %xmm0
-; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %xmm0, %xmm2, %xmm0 {%k1}
; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
@@ -1747,22 +1710,13 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, <2 x i64>* %a2_addr)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX512VL-NEXT: vpsrlq $33, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX512VL-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
-; AVX512VL-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX512VL-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
@@ -1985,22 +1939,13 @@ define <2 x i64> @vec128_i64_signed_mem_mem(<2 x i64>* %a1_addr, <2 x i64>* %a2_
; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX512VL-NEXT: vpsrlq $33, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
-; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm4
-; AVX512VL-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
-; AVX512VL-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX512VL-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX512VL-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
@@ -2109,13 +2054,13 @@ define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwi
;
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2139,14 +2084,12 @@ define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwi
; AVX512VLBW-LABEL: vec128_i16_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
@@ -2252,13 +2195,13 @@ define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) noun
; AVX512VL-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX512VL-FALLBACK-NEXT: vpternlogq $190, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
; AVX512VL-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %xmm4, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm2, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2282,14 +2225,12 @@ define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) noun
; AVX512VLBW-LABEL: vec128_i16_unsigned_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminuw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%t3 = icmp ugt <8 x i16> %a1, %a2
@@ -2375,13 +2316,13 @@ define <8 x i16> @vec128_i16_signed_mem_reg(<8 x i16>* %a1_addr, <8 x i16> %a2)
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm0, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm0, %xmm2, %xmm0
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2406,14 +2347,12 @@ define <8 x i16> @vec128_i16_signed_mem_reg(<8 x i16>* %a1_addr, <8 x i16> %a2)
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm0, %xmm1, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm0, %xmm1, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm0, %xmm0
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %xmm0, %xmm2, %xmm0 {%k1}
; AVX512VLBW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VLBW-NEXT: retq
%a1 = load <8 x i16>, <8 x i16>* %a1_addr
@@ -2498,13 +2437,13 @@ define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, <8 x i16>* %a2_addr)
; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2529,14 +2468,12 @@ define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, <8 x i16>* %a2_addr)
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%a2 = load <8 x i16>, <8 x i16>* %a2_addr
@@ -2627,13 +2564,13 @@ define <8 x i16> @vec128_i16_signed_mem_mem(<8 x i16>* %a1_addr, <8 x i16>* %a2_
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2659,14 +2596,12 @@ define <8 x i16> @vec128_i16_signed_mem_mem(<8 x i16>* %a1_addr, <8 x i16>* %a2_
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: retq
%a1 = load <8 x i16>, <8 x i16>* %a1_addr
@@ -2865,20 +2800,15 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
;
; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
@@ -2905,20 +2835,14 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
; AVX512VLBW-LABEL: vec128_i8_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
%t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -3110,20 +3034,15 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; AVX512VL-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX512VL-FALLBACK-NEXT: vpternlogq $190, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
; AVX512VL-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm2, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
@@ -3150,20 +3069,14 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
; AVX512VLBW-LABEL: vec128_i8_unsigned_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminub %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%t3 = icmp ugt <16 x i8> %a1, %a2
%t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -3366,20 +3279,15 @@ define <16 x i8> @vec128_i8_signed_mem_reg(<16 x i8>* %a1_addr, <16 x i8> %a2) n
; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm0, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm0, %xmm2, %xmm0
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
@@ -3407,20 +3315,14 @@ define <16 x i8> @vec128_i8_signed_mem_reg(<16 x i8>* %a1_addr, <16 x i8> %a2) n
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtb %xmm0, %xmm1, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %xmm0, %xmm1, %xmm2
; AVX512VLBW-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm0, %ymm0
-; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %xmm0, %xmm2, %xmm0 {%k1}
; AVX512VLBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%a1 = load <16 x i8>, <16 x i8>* %a1_addr
%t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
@@ -3620,20 +3522,15 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, <16 x i8>* %a2_addr) n
; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
@@ -3661,20 +3558,14 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, <16 x i8>* %a2_addr) n
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%a2 = load <16 x i8>, <16 x i8>* %a2_addr
%t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
@@ -3883,20 +3774,15 @@ define <16 x i8> @vec128_i8_signed_mem_mem(<16 x i8>* %a1_addr, <16 x i8>* %a2_a
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VL-FALLBACK-NEXT: vzeroupper
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
@@ -3925,20 +3811,14 @@ define <16 x i8> @vec128_i8_signed_mem_mem(<16 x i8>* %a1_addr, <16 x i8>* %a2_a
; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%a1 = load <16 x i8>, <16 x i8>* %a1_addr
%a2 = load <16 x i8>, <16 x i8>* %a2_addr
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index b5da974300039..de9605c3afce0 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -640,22 +640,13 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
; AVX512VL-LABEL: vec256_i64_signed_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa64 %ymm2, %ymm3 {%k1}
; AVX512VL-NEXT: vpminsq %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm1
; AVX512VL-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsrlq $33, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX512VL-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddq %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT: vpsllq $32, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
@@ -867,22 +858,13 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
; AVX512VL-LABEL: vec256_i64_unsigned_reg_reg:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa64 %ymm2, %ymm3 {%k1}
; AVX512VL-NEXT: vpminuq %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vpmaxuq %ymm1, %ymm0, %ymm1
; AVX512VL-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsrlq $33, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX512VL-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddq %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT: vpsllq $32, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
@@ -1093,22 +1075,13 @@ define <4 x i64> @vec256_i64_signed_mem_reg(<4 x i64>* %a1_addr, <4 x i64> %a2)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VL-NEXT: vpcmpgtq %ymm0, %ymm1, %k1
-; AVX512VL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa64 %ymm2, %ymm3 {%k1}
; AVX512VL-NEXT: vpminsq %ymm0, %ymm1, %ymm2
; AVX512VL-NEXT: vpmaxsq %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: vpsubq %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlq $1, %ymm0, %ymm2
-; AVX512VL-NEXT: vpsrlq $33, %ymm0, %ymm0
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX512VL-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm4, %ymm0
-; AVX512VL-NEXT: vpsllq $32, %ymm0, %ymm0
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %ymm0, %ymm2, %ymm0 {%k1}
; AVX512VL-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
@@ -1318,22 +1291,13 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, <4 x i64>* %a2_addr)
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VL-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa64 %ymm2, %ymm3 {%k1}
; AVX512VL-NEXT: vpminsq %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm1
; AVX512VL-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsrlq $33, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX512VL-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddq %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT: vpsllq $32, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
@@ -1548,22 +1512,13 @@ define <4 x i64> @vec256_i64_signed_mem_mem(<4 x i64>* %a1_addr, <4 x i64>* %a2_
; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-NEXT: vmovdqa (%rsi), %ymm1
; AVX512VL-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; AVX512VL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa64 %ymm2, %ymm3 {%k1}
; AVX512VL-NEXT: vpminsq %ymm1, %ymm0, %ymm2
; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm1
; AVX512VL-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsrlq $33, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlq $32, %ymm3, %ymm4
-; AVX512VL-NEXT: vpmuludq %ymm4, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddq %ymm1, %ymm4, %ymm1
-; AVX512VL-NEXT: vpsllq $32, %ymm1, %ymm1
-; AVX512VL-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpsubq %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VL-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
@@ -1702,13 +1657,13 @@ define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nou
;
; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -1731,14 +1686,12 @@ define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nou
; AVX512VLBW-LABEL: vec256_i16_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <16 x i16> %a1, %a2 ; signed
@@ -1854,13 +1807,13 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
; AVX512VL-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpminuw %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpternlogq $190, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %ymm2, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -1883,14 +1836,12 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
; AVX512VLBW-LABEL: vec256_i16_unsigned_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpnleuw %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminuw %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%t3 = icmp ugt <16 x i16> %a1, %a2
@@ -2007,13 +1958,13 @@ define <16 x i16> @vec256_i16_signed_mem_reg(<16 x i16>* %a1_addr, <16 x i16> %a
; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm1, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm0, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2037,14 +1988,12 @@ define <16 x i16> @vec256_i16_signed_mem_reg(<16 x i16>* %a1_addr, <16 x i16> %a
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtw %ymm0, %ymm1, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %ymm0, %ymm1, %ymm2
; AVX512VLBW-NEXT: vpmaxsw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VLBW-NEXT: vpmullw %ymm3, %ymm0, %ymm0
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %ymm0, %ymm2, %ymm0 {%k1}
; AVX512VLBW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512VLBW-NEXT: retq
%a1 = load <16 x i16>, <16 x i16>* %a1_addr
@@ -2160,13 +2109,13 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, <16 x i16>* %a2_add
; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2190,14 +2139,12 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, <16 x i16>* %a2_add
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%a2 = load <16 x i16>, <16 x i16>* %a2_addr
@@ -2319,13 +2266,13 @@ define <16 x i16> @vec256_i16_signed_mem_mem(<16 x i16>* %a1_addr, <16 x i16>* %
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2350,14 +2297,12 @@ define <16 x i16> @vec256_i16_signed_mem_mem(<16 x i16>* %a1_addr, <16 x i16>* %
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VLBW-NEXT: vmovdqa (%rsi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu16 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsw %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubw %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddw %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%a1 = load <16 x i16>, <16 x i16>* %a1_addr
@@ -2546,23 +2491,14 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
;
; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2589,18 +2525,13 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
; AVX512VLBW-LABEL: vec256_i8_signed_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512VLBW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%t3 = icmp sgt <32 x i8> %a1, %a2 ; signed
@@ -2788,23 +2719,14 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX512VL-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vpminub %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpternlogq $190, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %ymm2, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -2831,18 +2753,13 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
; AVX512VLBW-LABEL: vec256_i8_unsigned_reg_reg:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpcmpnleub %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminub %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512VLBW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%t3 = icmp ugt <32 x i8> %a1, %a2
@@ -3031,23 +2948,14 @@ define <32 x i8> @vec256_i8_signed_mem_reg(<32 x i8>* %a1_addr, <32 x i8> %a2) n
; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm1, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm0, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -3075,18 +2983,13 @@ define <32 x i8> @vec256_i8_signed_mem_reg(<32 x i8>* %a1_addr, <32 x i8> %a2) n
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtb %ymm0, %ymm1, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %ymm0, %ymm1, %ymm2
; AVX512VLBW-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: vpsubb %ymm2, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512VLBW-NEXT: vpmullw %zmm2, %zmm0, %zmm0
-; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %ymm0, %ymm2, %ymm0 {%k1}
; AVX512VLBW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512VLBW-NEXT: retq
%a1 = load <32 x i8>, <32 x i8>* %a1_addr
@@ -3274,23 +3177,14 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, <32 x i8>* %a2_addr) n
; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -3318,18 +3212,13 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, <32 x i8>* %a2_addr) n
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512VLBW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%a2 = load <32 x i8>, <32 x i8>* %a2_addr
@@ -3523,23 +3412,14 @@ define <32 x i8> @vec256_i8_signed_mem_mem(<32 x i8>* %a1_addr, <32 x i8>* %a2_a
; AVX512VL-FALLBACK: # %bb.0:
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %ymm1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm2, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VL-FALLBACK-NEXT: retq
;
@@ -3568,18 +3448,13 @@ define <32 x i8> @vec256_i8_signed_mem_mem(<32 x i8>* %a1_addr, <32 x i8>* %a2_a
; AVX512VLBW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512VLBW-NEXT: vmovdqa (%rsi), %ymm1
; AVX512VLBW-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
-; AVX512VLBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm3 {%k1}
; AVX512VLBW-NEXT: vpminsb %ymm1, %ymm0, %ymm2
; AVX512VLBW-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
; AVX512VLBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512VLBW-NEXT: vpmullw %zmm2, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm1 {%k1}
; AVX512VLBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512VLBW-NEXT: retq
%a1 = load <32 x i8>, <32 x i8>* %a1_addr
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
index ac1259d7d9459..47caed6531bb9 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-512.ll
@@ -140,22 +140,13 @@ define <8 x i64> @vec512_i64_signed_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwi
; ALL-LABEL: vec512_i64_signed_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
-; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
-; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
-; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
-; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
-; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
+; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1}
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
-; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
@@ -172,22 +163,13 @@ define <8 x i64> @vec512_i64_unsigned_reg_reg(<8 x i64> %a1, <8 x i64> %a2) noun
; ALL-LABEL: vec512_i64_unsigned_reg_reg:
; ALL: # %bb.0:
; ALL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
-; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminuq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
-; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
-; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
-; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
-; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
+; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1}
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
-; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%t3 = icmp ugt <8 x i64> %a1, %a2
%t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
@@ -207,22 +189,13 @@ define <8 x i64> @vec512_i64_signed_mem_reg(<8 x i64>* %a1_addr, <8 x i64> %a2)
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
-; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm0, %zmm1, %zmm2
; ALL-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
; ALL-NEXT: vpsubq %zmm2, %zmm0, %zmm0
-; ALL-NEXT: vpsrlq $1, %zmm0, %zmm2
-; ALL-NEXT: vpsrlq $33, %zmm0, %zmm0
-; ALL-NEXT: vpmuludq %zmm3, %zmm0, %zmm0
-; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
-; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
-; ALL-NEXT: vpaddq %zmm0, %zmm4, %zmm0
-; ALL-NEXT: vpsllq $32, %zmm0, %zmm0
-; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
+; ALL-NEXT: vpsrlq $1, %zmm0, %zmm0
+; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vpsubq %zmm0, %zmm2, %zmm0 {%k1}
; ALL-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a1 = load <8 x i64>, <8 x i64>* %a1_addr
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
@@ -241,22 +214,13 @@ define <8 x i64> @vec512_i64_signed_reg_mem(<8 x i64> %a1, <8 x i64>* %a2_addr)
; ALL: # %bb.0:
; ALL-NEXT: vmovdqa64 (%rdi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
-; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
-; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
-; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
-; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
-; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
+; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1}
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
-; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a2 = load <8 x i64>, <8 x i64>* %a2_addr
%t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
@@ -276,22 +240,13 @@ define <8 x i64> @vec512_i64_signed_mem_mem(<8 x i64>* %a1_addr, <8 x i64>* %a2_
; ALL-NEXT: vmovdqa64 (%rdi), %zmm0
; ALL-NEXT: vmovdqa64 (%rsi), %zmm1
; ALL-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
-; ALL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; ALL-NEXT: vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
-; ALL-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
; ALL-NEXT: vpminsq %zmm1, %zmm0, %zmm2
; ALL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
; ALL-NEXT: vpsubq %zmm2, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $1, %zmm1, %zmm2
-; ALL-NEXT: vpsrlq $33, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm1, %zmm1
-; ALL-NEXT: vpsrlq $32, %zmm3, %zmm4
-; ALL-NEXT: vpmuludq %zmm4, %zmm2, %zmm4
-; ALL-NEXT: vpaddq %zmm1, %zmm4, %zmm1
-; ALL-NEXT: vpsllq $32, %zmm1, %zmm1
-; ALL-NEXT: vpmuludq %zmm3, %zmm2, %zmm2
+; ALL-NEXT: vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; ALL-NEXT: vpsubq %zmm1, %zmm2, %zmm1 {%k1}
; ALL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
-; ALL-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; ALL-NEXT: retq
%a1 = load <8 x i64>, <8 x i64>* %a1_addr
%a2 = load <8 x i64>, <8 x i64>* %a2_addr
@@ -319,22 +274,26 @@ define <32 x i16> @vec512_i16_signed_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nou
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6
-; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm7
+; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpsubw %ymm7, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubw %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_reg:
@@ -343,35 +302,37 @@ define <32 x i16> @vec512_i16_signed_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nou
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm7
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm5, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
@@ -392,24 +353,26 @@ define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) n
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpminuw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5
-; AVX512F-NEXT: vpternlogq $15, %zmm5, %zmm5, %zmm5
; AVX512F-NEXT: vpminuw %ymm1, %ymm0, %ymm6
; AVX512F-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7
-; AVX512F-NEXT: vpternlogq $15, %zmm7, %zmm7, %zmm7
-; AVX512F-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
; AVX512F-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsubw %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm4, %ymm7, %ymm6
-; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm4, %ymm5, %ymm4
-; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT: vpsubw %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpsubw %ymm1, %ymm4, %ymm4
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512F-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_unsigned_reg_reg:
@@ -420,34 +383,35 @@ define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) n
; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm4, %ymm3, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminuw %ymm1, %ymm0, %ymm6
; AVX512VL-FALLBACK-NEXT: vpcmpeqw %ymm6, %ymm0, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
-; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpmaxuw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm4, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm4, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm7, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm4, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm4, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_unsigned_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminuw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxuw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp ugt <32 x i16> %a1, %a2
@@ -471,22 +435,26 @@ define <32 x i16> @vec512_i16_signed_mem_reg(<32 x i16>* %a1_addr, <32 x i16> %a
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5
; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6
-; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm7
+; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpsubw %ymm7, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT: vpsubw %ymm0, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_reg:
@@ -496,36 +464,38 @@ define <32 x i16> @vec512_i16_signed_mem_reg(<32 x i16>* %a1_addr, <32 x i16> %a
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm7
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpsubw %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubw %zmm0, %zmm2, %zmm0 {%k1}
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <32 x i16>, <32 x i16>* %a1_addr
@@ -548,22 +518,26 @@ define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, <32 x i16>* %a2_add
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm5
; AVX512F-NEXT: vpminsw %ymm2, %ymm3, %ymm6
-; AVX512F-NEXT: vpminsw %ymm1, %ymm0, %ymm7
+; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpsubw %ymm5, %ymm1, %ymm1
; AVX512F-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsubw %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpsubw %ymm7, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubw %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem:
@@ -573,36 +547,38 @@ define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, <32 x i16>* %a2_add
; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm2, %ymm3, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm0, %ymm7
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm2, %ymm3, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm2, %ymm5, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_reg_mem:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a2 = load <32 x i16>, <32 x i16>* %a2_addr
@@ -626,22 +602,26 @@ define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %
; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm5
; AVX512F-NEXT: vpminsw %ymm1, %ymm3, %ymm6
-; AVX512F-NEXT: vpminsw %ymm0, %ymm2, %ymm7
+; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512F-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpsubw %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512F-NEXT: vpsubw %ymm7, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubw %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT: vpsubw %ymm0, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512F-NEXT: vpaddw %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_mem:
@@ -652,22 +632,26 @@ define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %
; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm1, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminsw %ymm1, %ymm3, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsw %ymm0, %ymm2, %ymm7
+; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm5, %ymm0, %ymm0
; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm1, %ymm3, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsubw %ymm6, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpmaxsw %ymm0, %ymm2, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubw %ymm7, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpor %ymm6, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm1, %ymm5, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubw %ymm0, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i16_signed_mem_mem:
@@ -675,14 +659,12 @@ define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubw %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <32 x i16>, <32 x i16>* %a1_addr
@@ -707,113 +689,72 @@ define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %
define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_signed_reg_reg:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
-; AVX512F-NEXT: vpminsb %ymm3, %ymm2, %ymm6
-; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm7
-; AVX512F-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
-; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm6
; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpsubb %ymm7, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm1, %ymm8, %ymm1
-; AVX512F-NEXT: vpackuswb %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm2, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm6
; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm1, %ymm8, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm5, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
@@ -830,116 +771,72 @@ define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwin
define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512F-NEXT: vpminub %ymm4, %ymm2, %ymm5
-; AVX512F-NEXT: vpcmpeqb %ymm5, %ymm2, %ymm3
-; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT: vpminub %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; AVX512F-NEXT: vpminub %ymm1, %ymm0, %ymm6
; AVX512F-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7
-; AVX512F-NEXT: vpternlogq $15, %zmm7, %zmm7, %zmm7
-; AVX512F-NEXT: vpmaxub %ymm4, %ymm2, %ymm4
+; AVX512F-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
; AVX512F-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpmaxub %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubb %ymm4, %ymm2, %ymm2
; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpsubb %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm6, %ymm7, %ymm7
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm7, %ymm1, %ymm1
-; AVX512F-NEXT: vpand %ymm1, %ymm8, %ymm1
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpor %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512F-NEXT: vpsubb %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpsubb %ymm1, %ymm4, %ymm4
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512F-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm4
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpminub %ymm4, %ymm2, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm5, %ymm2, %ymm3
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpminub %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; AVX512VL-FALLBACK-NEXT: vpminub %ymm1, %ymm0, %ymm6
; AVX512VL-FALLBACK-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
-; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm4, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm5
; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpmaxub %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm4, %ymm2, %ymm2
; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm6, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm9 = ymm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm9, %ymm5
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm9, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm7, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpand %ymm1, %ymm9, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpternlogq $222, %ymm8, %ymm6, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm9, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm3, %ymm4, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm9, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm4, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $216, %zmm5, %zmm1, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_unsigned_reg_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminub %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxub %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%t3 = icmp ugt <64 x i8> %a1, %a2
@@ -958,116 +855,75 @@ define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounw
define <64 x i8> @vec512_i8_signed_mem_reg(<64 x i8>* %a1_addr, <64 x i8> %a2) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_reg:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
-; AVX512F-NEXT: vpminsb %ymm3, %ymm2, %ymm6
-; AVX512F-NEXT: vpminsb %ymm0, %ymm1, %ymm7
-; AVX512F-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
-; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm6
+; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512F-NEXT: vpand %ymm6, %ymm0, %ymm0
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpand %ymm0, %ymm8, %ymm0
-; AVX512F-NEXT: vpackuswb %ymm6, %ymm0, %ymm0
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT: vpaddb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT: vpsubb %ymm0, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_reg:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm2, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm1, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm2, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm1, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm6
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpand %ymm0, %ymm8, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm0, %ymm0
-; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_reg:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpsubb %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0
-; AVX512BW-NEXT: vpandq %zmm4, %zmm0, %zmm0
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %zmm0, %zmm2, %zmm0 {%k1}
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <64 x i8>, <64 x i8>* %a1_addr
@@ -1085,90 +941,60 @@ define <64 x i8> @vec512_i8_signed_mem_reg(<64 x i8>* %a1_addr, <64 x i8> %a2) n
define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, <64 x i8>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_reg_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm6
-; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm7
-; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
-; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512F-NEXT: vpminsb %ymm2, %ymm3, %ymm6
+; AVX512F-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
-; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm3, %ymm6
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm3, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpand %ymm2, %ymm8, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm2, %ymm5, %ymm2
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm1, %zmm4, %zmm2
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm2, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
@@ -1177,24 +1003,13 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, <64 x i8>* %a2_addr) n
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a2 = load <64 x i8>, <64 x i8>* %a2_addr
@@ -1212,94 +1027,64 @@ define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, <64 x i8>* %a2_addr) n
define <64 x i8> @vec512_i8_signed_mem_mem(<64 x i8>* %a1_addr, <64 x i8>* %a2_addr) nounwind {
; AVX512F-LABEL: vec512_i8_signed_mem_mem:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
-; AVX512F-NEXT: vpminsb %ymm3, %ymm1, %ymm6
-; AVX512F-NEXT: vpminsb %ymm2, %ymm0, %ymm7
-; AVX512F-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
-; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
-; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512F-NEXT: vpminsb %ymm0, %ymm2, %ymm5
+; AVX512F-NEXT: vpminsb %ymm1, %ymm3, %ymm6
+; AVX512F-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT: vpsubb %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512F-NEXT: vpsubb %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT: vpsubb %ymm0, %ymm5, %ymm5
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512F-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_mem:
; AVX512VL-FALLBACK: # %bb.0:
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm2
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm1
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm3, %ymm1, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpminsb %ymm2, %ymm0, %ymm7
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm3, %ymm1, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm2, %ymm0, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsubb %ymm7, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512VL-FALLBACK-NEXT: vpand %ymm6, %ymm8, %ymm6
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm5, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpand %ymm2, %ymm8, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpor %ymm7, %ymm4, %ymm4
-; AVX512VL-FALLBACK-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm6, %ymm5, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpand %ymm5, %ymm8, %ymm5
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512VL-FALLBACK-NEXT: vpmullw %ymm4, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpand %ymm3, %ymm8, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpackuswb %ymm5, %ymm3, %ymm3
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm1, %ymm3, %ymm1
-; AVX512VL-FALLBACK-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %ymm0
+; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rsi), %ymm1
+; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT: vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm1, %ymm3, %ymm4
+; AVX512VL-FALLBACK-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm0, %ymm2, %ymm5
+; AVX512VL-FALLBACK-NEXT: vpminsb %ymm1, %ymm3, %ymm6
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm1, %ymm5, %ymm1
+; AVX512VL-FALLBACK-NEXT: vpsubb %ymm0, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512VL-FALLBACK-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm1
+; AVX512VL-FALLBACK-NEXT: vextracti64x4 $1, %zmm1, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT: vpaddb %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512VL-FALLBACK-NEXT: retq
;
; AVX512BW-LABEL: vec512_i8_signed_mem_mem:
@@ -1307,24 +1092,13 @@ define <64 x i8> @vec512_i8_signed_mem_mem(<64 x i8>* %a1_addr, <64 x i8>* %a2_a
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
; AVX512BW-NEXT: vpminsb %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmaxsb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $1, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512BW-NEXT: vpandq %zmm4, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpandq %zmm4, %zmm1, %zmm1
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm1 {%k1}
; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%a1 = load <64 x i8>, <64 x i8>* %a1_addr
diff --git a/llvm/test/CodeGen/X86/vector-bo-select.ll b/llvm/test/CodeGen/X86/vector-bo-select.ll
index 8677197e287e9..04e65165e5425 100644
--- a/llvm/test/CodeGen/X86/vector-bo-select.ll
+++ b/llvm/test/CodeGen/X86/vector-bo-select.ll
@@ -1083,9 +1083,8 @@ define <4 x i32> @mul_v4i32(<4 x i1> %b, <4 x i32> noundef %x, <4 x i32> noundef
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm0 = [1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1}
-; AVX512VL-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT: vpmulld %xmm2, %xmm1, %xmm1 {%k1}
+; AVX512VL-NEXT: vmovdqa %xmm1, %xmm0
; AVX512VL-NEXT: retq
%s = select <4 x i1> %b, <4 x i32> %y, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
%r = mul <4 x i32> %x, %s
@@ -1118,9 +1117,8 @@ define <8 x i32> @mul_v8i32_commute(<8 x i1> %b, <8 x i32> noundef %x, <8 x i32>
; AVX512VL-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %ymm2, %ymm0 {%k1}
-; AVX512VL-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmulld %ymm2, %ymm1, %ymm1 {%k1}
+; AVX512VL-NEXT: vmovdqa %ymm1, %ymm0
; AVX512VL-NEXT: retq
%s = select <8 x i1> %b, <8 x i32> %y, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%r = mul <8 x i32> %s, %x
@@ -1152,9 +1150,7 @@ define <8 x i32> @mul_v8i32_cast_cond(i8 noundef zeroext %pb, <8 x i32> noundef
; AVX512VL-LABEL: mul_v8i32_cast_cond:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: kmovw %edi, %k1
-; AVX512VL-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1]
-; AVX512VL-NEXT: vmovdqa32 %ymm1, %ymm2 {%k1}
-; AVX512VL-NEXT: vpmulld %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 {%k1}
; AVX512VL-NEXT: retq
%b = bitcast i8 %pb to <8 x i1>
%s = select <8 x i1> %b, <8 x i32> %y, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
@@ -1197,16 +1193,14 @@ define <8 x i64> @mul_v8i64_cast_cond(i8 noundef zeroext %pb, <8 x i64> noundef
; AVX512-LABEL: mul_v8i64_cast_cond:
; AVX512: # %bb.0:
; AVX512-NEXT: kmovw %edi, %k1
-; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1]
-; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1}
-; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm1
-; AVX512-NEXT: vpmuludq %zmm2, %zmm1, %zmm1
-; AVX512-NEXT: vpsrlq $32, %zmm2, %zmm3
-; AVX512-NEXT: vpmuludq %zmm3, %zmm0, %zmm3
-; AVX512-NEXT: vpaddq %zmm1, %zmm3, %zmm1
-; AVX512-NEXT: vpsllq $32, %zmm1, %zmm1
-; AVX512-NEXT: vpmuludq %zmm2, %zmm0, %zmm0
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $32, %zmm1, %zmm2
+; AVX512-NEXT: vpmuludq %zmm2, %zmm0, %zmm2
+; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm3
+; AVX512-NEXT: vpmuludq %zmm1, %zmm3, %zmm3
+; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
+; AVX512-NEXT: vpsllq $32, %zmm2, %zmm2
+; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm1
+; AVX512-NEXT: vpaddq %zmm2, %zmm1, %zmm0 {%k1}
; AVX512-NEXT: retq
%b = bitcast i8 %pb to <8 x i1>
%s = select <8 x i1> %b, <8 x i64> %y, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
More information about the llvm-commits
mailing list