[llvm] a9f5a44 - [X86] Regenerate test checks with vpternlog comments
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 9 03:55:18 PDT 2024
Author: Simon Pilgrim
Date: 2024-10-09T11:47:43+01:00
New Revision: a9f5a44aa0032c6efb262d2d5f79847045e525f1
URL: https://github.com/llvm/llvm-project/commit/a9f5a44aa0032c6efb262d2d5f79847045e525f1
DIFF: https://github.com/llvm/llvm-project/commit/a9f5a44aa0032c6efb262d2d5f79847045e525f1.diff
LOG: [X86] Regenerate test checks with vpternlog comments
Added:
Modified:
llvm/test/CodeGen/X86/avx512vl-logic.ll
llvm/test/CodeGen/X86/vec_smulo.ll
llvm/test/CodeGen/X86/vec_umulo.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
llvm/test/CodeGen/X86/vector-lzcnt-512.ll
llvm/test/CodeGen/X86/vector-tzcnt-512.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/avx512vl-logic.ll b/llvm/test/CodeGen/X86/avx512vl-logic.ll
index 58621967e2aca6..284a0eb33047c4 100644
--- a/llvm/test/CodeGen/X86/avx512vl-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-logic.ll
@@ -958,7 +958,7 @@ entry:
define <4 x i32> @ternlog_and_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_and_andn:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $8, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm2 & xmm1 & ~xmm0
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = and <4 x i32> %y, %a
@@ -969,7 +969,7 @@ define <4 x i32> @ternlog_and_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_or_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_or_andn:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $206, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = (xmm1 & ~xmm0) | xmm2
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = and <4 x i32> %y, %a
@@ -980,7 +980,7 @@ define <4 x i32> @ternlog_or_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_and_orn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_and_orn:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $176, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm0 & (xmm1 | ~xmm2)
; CHECK-NEXT: retq
%a = xor <4 x i32> %z, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = or <4 x i32> %a, %y
@@ -991,7 +991,7 @@ define <4 x i32> @ternlog_and_orn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_and_orn_2(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_and_orn_2:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $208, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm0 & (xmm1 | ~xmm2)
; CHECK-NEXT: retq
%a = xor <4 x i32> %z, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = or <4 x i32> %y, %a
@@ -1006,7 +1006,7 @@ define <4 x i32> @ternlog_orn_and(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1
-; CHECK-NEXT: vpternlogd $222, %xmm3, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm1 | (xmm0 ^ xmm3)
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = and <4 x i32> %y, %z
@@ -1017,7 +1017,7 @@ define <4 x i32> @ternlog_orn_and(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_orn_and_2(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_orn_and_2:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $143, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = (xmm1 & xmm2) | ~xmm0
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = and <4 x i32> %y, %z
@@ -1028,7 +1028,7 @@ define <4 x i32> @ternlog_orn_and_2(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_xor_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_xor_andn:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $198, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm2 ^ (xmm1 & ~xmm0)
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = and <4 x i32> %y, %a
@@ -1039,7 +1039,7 @@ define <4 x i32> @ternlog_xor_andn(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_or_and_mask(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: ternlog_or_and_mask:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = (xmm0 & mem) | xmm1
; CHECK-NEXT: retq
%a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
%b = or <4 x i32> %a, %y
@@ -1049,7 +1049,7 @@ define <4 x i32> @ternlog_or_and_mask(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @ternlog_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y) {
; CHECK-LABEL: ternlog_or_and_mask_ymm:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; CHECK-NEXT: vpternlogd {{.*#+}} ymm0 = (ymm0 & mem) | ymm1
; CHECK-NEXT: retq
%a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
%b = or <8 x i32> %a, %y
@@ -1059,7 +1059,7 @@ define <8 x i32> @ternlog_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @ternlog_xor_and_mask(<2 x i64> %x, <2 x i64> %y) {
; CHECK-LABEL: ternlog_xor_and_mask:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogq {{.*#+}} xmm0 = xmm1 ^ (xmm0 & mem)
; CHECK-NEXT: retq
%a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
%b = xor <2 x i64> %a, %y
@@ -1069,7 +1069,7 @@ define <2 x i64> @ternlog_xor_and_mask(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @ternlog_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y) {
; CHECK-LABEL: ternlog_xor_and_mask_ymm:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; CHECK-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & mem)
; CHECK-NEXT: retq
%a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
%b = xor <4 x i64> %a, %y
@@ -1081,7 +1081,7 @@ define <4 x i32> @ternlog_maskz_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32
; CHECK: ## %bb.0:
; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
; CHECK-NEXT: vpsrad $31, %xmm3, %xmm0
-; CHECK-NEXT: vpternlogd $224, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm0 & (xmm2 | xmm1)
; CHECK-NEXT: retq
%m = icmp slt <4 x i32> %mask, zeroinitializer
%a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
@@ -1095,7 +1095,7 @@ define <8 x i32> @ternlog_maskz_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x
; CHECK: ## %bb.0:
; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm3
; CHECK-NEXT: vpsrad $31, %ymm2, %ymm0
-; CHECK-NEXT: vpternlogd $224, %ymm1, %ymm3, %ymm0
+; CHECK-NEXT: vpternlogd {{.*#+}} ymm0 = ymm0 & (ymm3 | ymm1)
; CHECK-NEXT: retq
%m = icmp slt <8 x i32> %mask, zeroinitializer
%a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
@@ -1109,7 +1109,7 @@ define <2 x i64> @ternlog_maskz_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i6
; CHECK: ## %bb.0:
; CHECK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm3
; CHECK-NEXT: vpsraq $63, %xmm2, %xmm0
-; CHECK-NEXT: vpternlogq $96, %xmm1, %xmm3, %xmm0
+; CHECK-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 & (xmm3 ^ xmm1)
; CHECK-NEXT: retq
%m = icmp slt <2 x i64> %mask, zeroinitializer
%a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
@@ -1123,7 +1123,7 @@ define <4 x i64> @ternlog_maskz_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4
; CHECK: ## %bb.0:
; CHECK-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm3
; CHECK-NEXT: vpsraq $63, %ymm2, %ymm0
-; CHECK-NEXT: vpternlogq $96, %ymm1, %ymm3, %ymm0
+; CHECK-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 & (ymm3 ^ ymm1)
; CHECK-NEXT: retq
%m = icmp slt <4 x i64> %mask, zeroinitializer
%a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
@@ -1317,7 +1317,7 @@ define <4 x i64> @ternlog_masky_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4
define <4 x i32> @ternlog_andn_or(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_andn_or:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $14, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = ~xmm0 & (xmm1 | xmm2)
; CHECK-NEXT: retq
%a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%b = or <4 x i32> %y, %z
@@ -1328,7 +1328,7 @@ define <4 x i32> @ternlog_andn_or(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
define <4 x i32> @ternlog_andn_or_2(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z) {
; CHECK-LABEL: ternlog_andn_or_2:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vpternlogd $16, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT: vpternlogd {{.*#+}} xmm0 = xmm0 & ~(xmm1 | xmm2)
; CHECK-NEXT: retq
%a = or <4 x i32> %y, %z
%b = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll
index c06fc5b6b6f6a8..50cbb14d52427d 100644
--- a/llvm/test/CodeGen/X86/vec_smulo.ll
+++ b/llvm/test/CodeGen/X86/vec_smulo.ll
@@ -139,7 +139,7 @@ define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) nounwind {
; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
; AVX512-NEXT: vpsrad $31, %xmm2, %xmm0
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512-NEXT: vmovq %xmm2, (%rdi)
; AVX512-NEXT: retq
%t = call {<2 x i32>, <2 x i1>} @llvm.smul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
@@ -1234,7 +1234,7 @@ define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, ptr %p2) nounwin
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpsrad $31, %zmm1, %zmm0
; AVX512-NEXT: vpcmpneqd %zmm0, %zmm4, %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<16 x i32>, <16 x i1>} @llvm.smul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
@@ -1443,7 +1443,7 @@ define <16 x i32> @smulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsraw $15, %ymm2, %ymm2
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512F-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, (%rdi)
; AVX512F-NEXT: retq
@@ -1457,7 +1457,7 @@ define <16 x i32> @smulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpsllw $8, %ymm1, %ymm2
; AVX512BW-NEXT: vpsraw $15, %ymm2, %ymm2
; AVX512BW-NEXT: vpcmpneqw %ymm0, %ymm2, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %ymm1, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<16 x i8>, <16 x i1>} @llvm.smul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
@@ -1853,8 +1853,8 @@ define <32 x i32> @smulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512F-NEXT: vpcmpneqd %zmm0, %zmm1, %k2
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, 16(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
@@ -1870,9 +1870,9 @@ define <32 x i32> @smulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpsllw $8, %zmm2, %zmm1
; AVX512BW-NEXT: vpsraw $15, %zmm1, %zmm1
; AVX512BW-NEXT: vpcmpneqw %zmm0, %zmm1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %zmm2, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<32 x i8>, <32 x i1>} @llvm.smul.with.overflow.v32i8(<32 x i8> %a0, <32 x i8> %a1)
@@ -2637,10 +2637,10 @@ define <64 x i32> @smulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsraw $15, %ymm1, %ymm1
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512F-NEXT: vpcmpneqd %zmm0, %zmm1, %k4
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k3} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k4} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k3} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm2 {%k2} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
; AVX512F-NEXT: vpmovdb %zmm4, 48(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
@@ -2670,13 +2670,13 @@ define <64 x i32> @smulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpmovb2m %zmm4, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
; AVX512BW-NEXT: vpcmpneqb %zmm1, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k2} {z} = -1
; AVX512BW-NEXT: kshiftrq $32, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm2 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<64 x i8>, <64 x i1>} @llvm.smul.with.overflow.v64i8(<64 x i8> %a0, <64 x i8> %a1)
@@ -2770,7 +2770,7 @@ define <8 x i32> @smulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpmullw %xmm1, %xmm0, %xmm1
; AVX512F-NEXT: vpsraw $15, %xmm1, %xmm0
; AVX512F-NEXT: vpcmpeqw %xmm0, %xmm2, %xmm0
-; AVX512F-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k1
; AVX512F-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll
index 6311678924d06a..4d7d2573183e07 100644
--- a/llvm/test/CodeGen/X86/vec_umulo.ll
+++ b/llvm/test/CodeGen/X86/vec_umulo.ll
@@ -113,7 +113,7 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) nounwind {
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512-NEXT: vmovq %xmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
@@ -1028,7 +1028,7 @@ define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, ptr %p2) nounwin
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k1
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
@@ -1218,7 +1218,7 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, (%rdi)
; AVX512F-NEXT: retq
@@ -1230,7 +1230,7 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm0
; AVX512BW-NEXT: vptestmw %ymm0, %ymm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %ymm1, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
@@ -1589,8 +1589,8 @@ define <32 x i32> @umulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, 16(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
@@ -1604,9 +1604,9 @@ define <32 x i32> @umulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm0
; AVX512BW-NEXT: vptestmw %zmm0, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %zmm2, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<32 x i8>, <32 x i1>} @llvm.umul.with.overflow.v32i8(<32 x i8> %a0, <32 x i8> %a1)
@@ -2297,10 +2297,10 @@ define <64 x i32> @umulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm7, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k4
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k3} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
-; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k4} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k3} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm2 {%k2} {z} = -1
+; AVX512F-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
; AVX512F-NEXT: vpmovdb %zmm4, 48(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
@@ -2328,13 +2328,13 @@ define <64 x i32> @umulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vptestmb %zmm0, %zmm0, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k2} {z} = -1
; AVX512BW-NEXT: kshiftrq $32, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm2 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<64 x i8>, <64 x i1>} @llvm.umul.with.overflow.v64i8(<64 x i8> %a0, <64 x i8> %a1)
@@ -2428,7 +2428,7 @@ define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k1
; AVX512F-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index f105e065866af8..1b30b0814330d4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -985,7 +985,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa (%rdi), %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm3 & (ymm2 ^ ymm1))
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -998,7 +998,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1008,7 +1008,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm0 & (ymm1 ^ ymm2))
; AVX512-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1029,7 +1029,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm3 & (ymm2 ^ ymm1))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1042,7 +1042,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1052,7 +1052,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm0 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1073,7 +1073,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm3 & (ymm2 ^ ymm1))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1086,7 +1086,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1096,7 +1096,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm0 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1117,7 +1117,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm3 & (ymm2 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1130,7 +1130,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1140,7 +1140,7 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm0 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1810,7 +1810,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512-NEXT: vmovdqa 160(%rdi), %ymm6
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm5 ^ (ymm1 & (ymm6 ^ ymm5))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -1822,7 +1822,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa (%rdi), %ymm8
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm9
; AVX512-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm9 ^ (ymm3 & (ymm8 ^ ymm9))
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1836,7 +1836,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm10 & (ymm5 ^ ymm6))
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -1847,7 +1847,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm12, %ymm13
-; AVX512-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm8 ^ (ymm13 & (ymm9 ^ ymm8))
; AVX512-NEXT: vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1858,7 +1858,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,6,7,4]
; AVX512-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm5 ^ (ymm12 & (ymm6 ^ ymm5))
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm12[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -1867,7 +1867,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
; AVX512-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -1888,7 +1888,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm5 ^ (ymm1 & (ymm6 ^ ymm5))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -1900,7 +1900,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm8
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm9 ^ (ymm3 & (ymm8 ^ ymm9))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1914,7 +1914,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm10 & (ymm5 ^ ymm6))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -1925,7 +1925,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm8 ^ (ymm13 & (ymm9 ^ ymm8))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -1936,7 +1936,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,6,7,4]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm5 ^ (ymm12 & (ymm6 ^ ymm5))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm12[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -1945,7 +1945,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -1966,7 +1966,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm6
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm5 ^ (ymm1 & (ymm6 ^ ymm5))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -1978,7 +1978,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm8
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm9
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm9 ^ (ymm3 & (ymm8 ^ ymm9))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -1992,7 +1992,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm10 & (ymm5 ^ ymm6))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -2003,7 +2003,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm12, %ymm13
-; AVX512DQ-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm8 ^ (ymm13 & (ymm9 ^ ymm8))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -2014,7 +2014,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,6,7,4]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm5 ^ (ymm12 & (ymm6 ^ ymm5))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm12[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -2023,7 +2023,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -2044,7 +2044,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm5 ^ (ymm1 & (ymm6 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -2056,7 +2056,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm8
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm9 ^ (ymm3 & (ymm8 ^ ymm9))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -2070,7 +2070,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm10 & (ymm5 ^ ymm6))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -2081,7 +2081,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm8 ^ (ymm13 & (ymm9 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -2092,7 +2092,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,6,7,4]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm5 ^ (ymm12 & (ymm6 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm12[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -2101,7 +2101,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm8 ^ (ymm0 & (ymm9 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -3440,7 +3440,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa64 224(%rdi), %ymm18
; AVX512-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm18 ^ (ymm1 & (ymm20 ^ ymm18))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -3459,7 +3459,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa64 320(%rdi), %ymm21
; AVX512-NEXT: vmovdqa64 352(%rdi), %ymm22
; AVX512-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm21 ^ (ymm8 & (ymm22 ^ ymm21))
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -3476,7 +3476,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa64 128(%rdi), %ymm23
; AVX512-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm11 ^ ymm23))
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
; AVX512-NEXT: vpshufb %ymm10, %ymm5, %ymm10
@@ -3488,7 +3488,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vmovdqa64 (%rdi), %ymm24
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm12 ^ (ymm10 & (ymm24 ^ ymm12))
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
; AVX512-NEXT: vpshufb %ymm7, %ymm1, %ymm7
@@ -3502,7 +3502,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
; AVX512-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm22 ^ (ymm2 & (ymm21 ^ ymm22))
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -3516,7 +3516,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm13, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm20 ^ (ymm2 & (ymm18 ^ ymm20))
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -3532,7 +3532,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
; AVX512-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm23 ^ ymm11))
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
; AVX512-NEXT: vpshufb %ymm9, %ymm2, %ymm2
@@ -3541,7 +3541,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX512-NEXT: vmovdqa %ymm13, %ymm6
-; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm24 ^ (ymm6 & (ymm12 ^ ymm24))
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512-NEXT: vpshufb %ymm4, %ymm6, %ymm4
@@ -3552,7 +3552,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm11 = ymm23 ^ (ymm13 & (ymm11 ^ ymm23))
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -3562,7 +3562,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm24 ^ (ymm0 & (ymm12 ^ ymm24))
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
; AVX512-NEXT: vpshufb %ymm11, %ymm4, %ymm4
@@ -3572,7 +3572,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm21 ^ (ymm13 & (ymm22 ^ ymm21))
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
; AVX512-NEXT: vmovdqa64 %xmm25, %xmm4
@@ -3582,7 +3582,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm20 ^ (ymm0 & (ymm18 ^ ymm20))
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
; AVX512-NEXT: vpshufb %ymm11, %ymm0, %ymm0
@@ -3607,7 +3607,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm18
; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm18 ^ (ymm1 & (ymm20 ^ ymm18))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -3626,7 +3626,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm21
; AVX512-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm21 ^ (ymm8 & (ymm22 ^ ymm21))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -3643,7 +3643,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %ymm23
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm11 ^ ymm23))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm10
@@ -3655,7 +3655,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm12 ^ (ymm10 & (ymm24 ^ ymm12))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm7
@@ -3669,7 +3669,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm22 ^ (ymm2 & (ymm21 ^ ymm22))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -3683,7 +3683,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm20 ^ (ymm2 & (ymm18 ^ ymm20))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -3699,7 +3699,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm23 ^ ymm11))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
@@ -3708,7 +3708,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm24 ^ (ymm6 & (ymm12 ^ ymm24))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
@@ -3719,7 +3719,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm23 ^ (ymm13 & (ymm11 ^ ymm23))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -3729,7 +3729,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm24 ^ (ymm0 & (ymm12 ^ ymm24))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
@@ -3739,7 +3739,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm21 ^ (ymm13 & (ymm22 ^ ymm21))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
@@ -3749,7 +3749,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm20 ^ (ymm0 & (ymm18 ^ ymm20))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
@@ -3774,7 +3774,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 224(%rdi), %ymm18
; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm18 ^ (ymm1 & (ymm20 ^ ymm18))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -3793,7 +3793,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %ymm21
; AVX512DQ-NEXT: vmovdqa64 352(%rdi), %ymm22
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm21 ^ (ymm8 & (ymm22 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -3810,7 +3810,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %ymm23
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm11 ^ ymm23))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
; AVX512DQ-NEXT: vpshufb %ymm10, %ymm5, %ymm10
@@ -3822,7 +3822,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vmovdqa64 (%rdi), %ymm24
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm12 ^ (ymm10 & (ymm24 ^ ymm12))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
; AVX512DQ-NEXT: vpshufb %ymm7, %ymm1, %ymm7
@@ -3836,7 +3836,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm22 ^ (ymm2 & (ymm21 ^ ymm22))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -3850,7 +3850,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm20 ^ (ymm2 & (ymm18 ^ ymm20))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -3866,7 +3866,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm23 ^ ymm11))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm2, %ymm2
@@ -3875,7 +3875,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm6
-; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm24 ^ (ymm6 & (ymm12 ^ ymm24))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm6, %ymm4
@@ -3886,7 +3886,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm11 = ymm23 ^ (ymm13 & (ymm11 ^ ymm23))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -3896,7 +3896,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm24 ^ (ymm0 & (ymm12 ^ ymm24))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm4, %ymm4
@@ -3906,7 +3906,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm21 ^ (ymm13 & (ymm22 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm4
@@ -3916,7 +3916,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm20 ^ (ymm0 & (ymm18 ^ ymm20))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm0, %ymm0
@@ -3941,7 +3941,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm18
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm18 ^ (ymm1 & (ymm20 ^ ymm18))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
@@ -3960,7 +3960,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm21
; AVX512DQ-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm21 ^ (ymm8 & (ymm22 ^ ymm21))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
@@ -3977,7 +3977,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %ymm23
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm11 ^ ymm23))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm10
@@ -3989,7 +3989,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm12 ^ (ymm10 & (ymm24 ^ ymm12))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm7
@@ -4003,7 +4003,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm22 ^ (ymm2 & (ymm21 ^ ymm22))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
@@ -4017,7 +4017,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm20 ^ (ymm2 & (ymm18 ^ ymm20))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
@@ -4033,7 +4033,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm23 ^ ymm11))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
@@ -4042,7 +4042,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm24 ^ (ymm6 & (ymm12 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
@@ -4053,7 +4053,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm23 ^ (ymm13 & (ymm11 ^ ymm23))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
@@ -4063,7 +4063,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm24 ^ (ymm0 & (ymm12 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
@@ -4073,7 +4073,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm21 ^ (ymm13 & (ymm22 ^ ymm21))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
@@ -4083,7 +4083,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm20 ^ (ymm0 & (ymm18 ^ ymm20))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
index faecad65c395b2..e4ddf5bc3a8af5 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
@@ -858,7 +858,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512-NEXT: vmovdqa %xmm0, (%rsi)
@@ -879,7 +879,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rsi)
@@ -900,7 +900,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsi)
@@ -921,7 +921,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rsi)
@@ -1412,7 +1412,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
@@ -1440,7 +1440,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
@@ -1468,7 +1468,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rsi)
@@ -1496,7 +1496,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
@@ -2436,7 +2436,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
@@ -2483,7 +2483,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512-FCP-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
@@ -2530,7 +2530,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
@@ -2577,7 +2577,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index 43a45b9fd59a75..db8c74f2741c8f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1641,7 +1641,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm5 ^ (ymm0 & (ymm4 ^ ymm5))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u]
@@ -1653,7 +1653,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm6, %xmm2, %xmm6
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
; AVX512-NEXT: vmovdqa %ymm2, %ymm7
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13,u,u,u]
@@ -1662,7 +1662,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,7,12]
; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm4 ^ (ymm8 & (ymm5 ^ ymm4))
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,7,12],zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[u,u,u]
@@ -1670,7 +1670,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb %xmm3, %xmm8, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[3,8,13]
; AVX512-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm4 ^ (ymm1 & (ymm5 ^ ymm4))
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
@@ -1678,7 +1678,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[4,9,14]
; AVX512-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX512-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
@@ -1699,7 +1699,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm5 ^ (ymm0 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u]
@@ -1711,7 +1711,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm6, %xmm2, %xmm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13,u,u,u]
@@ -1720,7 +1720,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,7,12]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm4 ^ (ymm8 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,7,12],zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[u,u,u]
@@ -1728,7 +1728,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[3,8,13]
; AVX512-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm4 ^ (ymm1 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
@@ -1736,7 +1736,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[4,9,14]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
@@ -1757,7 +1757,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm5 ^ (ymm0 & (ymm4 ^ ymm5))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u]
@@ -1769,7 +1769,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm6, %xmm2, %xmm6
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm7
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13,u,u,u]
@@ -1778,7 +1778,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,7,12]
; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm4 ^ (ymm8 & (ymm5 ^ ymm4))
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,7,12],zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[u,u,u]
@@ -1786,7 +1786,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb %xmm3, %xmm8, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[3,8,13]
; AVX512DQ-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm4 ^ (ymm1 & (ymm5 ^ ymm4))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
@@ -1794,7 +1794,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[4,9,14]
; AVX512DQ-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
@@ -1815,7 +1815,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm5 ^ (ymm0 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u]
@@ -1827,7 +1827,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm2, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13,u,u,u]
@@ -1836,7 +1836,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,7,12]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm4 ^ (ymm8 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,7,12],zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[u,u,u]
@@ -1844,7 +1844,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm8, %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[3,8,13]
; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm8, %xmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm4 ^ (ymm1 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
@@ -1852,7 +1852,7 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[4,9,14]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm4 ^ (ymm2 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
@@ -3183,20 +3183,20 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512-NEXT: vmovdqa 96(%rdi), %ymm1
; AVX512-NEXT: vmovdqa %ymm2, %ymm4
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm8, %ymm6, %ymm6
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm3 ^ ymm5))
; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm9[4,9,14],zero,zero,zero,xmm9[2,7,12,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512-NEXT: vpor %xmm7, %xmm9, %xmm9
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512-NEXT: vpternlogq $236, %ymm11, %ymm6, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = (ymm9 & ymm11) | ymm6
; AVX512-NEXT: vmovdqa 144(%rdi), %xmm7
; AVX512-NEXT: vpshufb %xmm8, %xmm7, %xmm6
; AVX512-NEXT: vmovdqa 128(%rdi), %xmm8
@@ -3207,18 +3207,18 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm10, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm0 ^ (ymm9 & (ymm1 ^ ymm0))
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm9[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm9))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm9, %ymm12, %ymm12
; AVX512-NEXT: vmovdqa %ymm2, %ymm13
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm5 ^ (ymm13 & (ymm3 ^ ymm5))
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14],zero,zero,zero,xmm13[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[0,5,10,15],zero,zero,zero,xmm13[3,8,13,u,u,u]
; AVX512-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512-NEXT: vpternlogq $236, %ymm11, %ymm12, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & ymm11) | ymm12
; AVX512-NEXT: vpshufb %xmm9, %xmm7, %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512-NEXT: vpor %xmm9, %xmm12, %xmm9
@@ -3226,17 +3226,17 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendw {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7],ymm13[8,9,10,11,12],ymm9[13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
; AVX512-NEXT: vmovdqa %ymm4, %ymm12
-; AVX512-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm0 ^ (ymm12 & (ymm1 ^ ymm0))
; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (mem & (ymm13 ^ ymm12))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm12, %ymm13, %ymm13
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm3 ^ (ymm10 & (ymm5 ^ ymm3))
; AVX512-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15],zero,zero,zero,xmm10[u,u,u]
; AVX512-NEXT: vpor %xmm14, %xmm10, %xmm10
-; AVX512-NEXT: vpternlogq $236, %ymm11, %ymm13, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = (ymm10 & ymm11) | ymm13
; AVX512-NEXT: vpshufb %xmm12, %xmm7, %xmm11
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512-NEXT: vpor %xmm11, %xmm12, %xmm11
@@ -3244,39 +3244,39 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendw {{.*#+}} ymm11 = ymm10[0,1,2,3,4],ymm11[5,6,7],ymm10[8,9,10,11,12],ymm11[13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm11
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm11 = ymm0 ^ (ymm11 & (ymm1 ^ ymm0))
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm11))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb %ymm11, %ymm12, %ymm12
; AVX512-NEXT: vmovdqa %ymm4, %ymm13
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm5 ^ ymm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[3,8,13],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[2,7,12],zero,zero,zero,xmm13[0,5,10,15,u,u,u]
; AVX512-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & mem) | ymm12
; AVX512-NEXT: vpshufb %xmm11, %xmm7, %xmm7
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
; AVX512-NEXT: vpmovsxwq {{.*#+}} ymm8 = [18446744073709551615,18446744073709551615,18446744073709551615,255]
-; AVX512-NEXT: vpternlogq $184, %ymm13, %ymm8, %ymm7
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (ymm8 & (ymm7 ^ ymm13))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (ymm2 & (ymm5 ^ ymm3))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm4[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm4))
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm1
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,5,0,5,0,5,0,5]
; AVX512-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX512-NEXT: vpternlogq $184, %ymm0, %ymm8, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm8 & (ymm1 ^ ymm0))
; AVX512-NEXT: vmovdqa %ymm6, (%rsi)
; AVX512-NEXT: vmovdqa %ymm9, (%rdx)
; AVX512-NEXT: vmovdqa %ymm10, (%rcx)
@@ -3293,20 +3293,20 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm4
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm3 ^ ymm5))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm9[4,9,14],zero,zero,zero,xmm9[2,7,12,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm9, %xmm9
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm6, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = (ymm9 & ymm11) | ymm6
; AVX512-FCP-NEXT: vmovdqa 144(%rdi), %xmm7
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm6
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %xmm8
@@ -3317,18 +3317,18 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm10, %ymm9
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm0 ^ (ymm9 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm9[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm9))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm12
; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm5 ^ (ymm13 & (ymm3 ^ ymm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14],zero,zero,zero,xmm13[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[0,5,10,15],zero,zero,zero,xmm13[3,8,13,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm12, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & ymm11) | ymm12
; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm9, %xmm12, %xmm9
@@ -3336,17 +3336,17 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7],ymm13[8,9,10,11,12],ymm9[13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm12
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm0 ^ (ymm12 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (mem & (ymm13 ^ ymm12))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm3 ^ (ymm10 & (ymm5 ^ ymm3))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15],zero,zero,zero,xmm10[u,u,u]
; AVX512-FCP-NEXT: vpor %xmm14, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm11, %ymm13, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = (ymm10 & ymm11) | ymm13
; AVX512-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm11
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm11, %xmm12, %xmm11
@@ -3354,39 +3354,39 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm10[0,1,2,3,4],ymm11[5,6,7],ymm10[8,9,10,11,12],ymm11[13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm0 ^ (ymm11 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm11))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm12, %ymm12
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm5 ^ ymm3))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[3,8,13],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[2,7,12],zero,zero,zero,xmm13[0,5,10,15,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & mem) | ymm12
; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} ymm8 = [18446744073709551615,18446744073709551615,18446744073709551615,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm13, %ymm8, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (ymm8 & (ymm7 ^ ymm13))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (ymm2 & (ymm5 ^ ymm3))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm4[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm4))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,5,0,5,0,5,0,5]
; AVX512-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm0, %ymm8, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm8 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa %ymm6, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %ymm9, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %ymm10, (%rcx)
@@ -3403,20 +3403,20 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm4
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm6, %ymm6
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm3 ^ ymm5))
; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm9
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm9[4,9,14],zero,zero,zero,xmm9[2,7,12,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-NEXT: vpor %xmm7, %xmm9, %xmm9
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm11, %ymm6, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = (ymm9 & ymm11) | ymm6
; AVX512DQ-NEXT: vmovdqa 144(%rdi), %xmm7
; AVX512DQ-NEXT: vpshufb %xmm8, %xmm7, %xmm6
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %xmm8
@@ -3427,18 +3427,18 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm0 ^ (ymm9 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm9[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm9))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm12, %ymm12
; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm13
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm5 ^ (ymm13 & (ymm3 ^ ymm5))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14],zero,zero,zero,xmm13[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[0,5,10,15],zero,zero,zero,xmm13[3,8,13,u,u,u]
; AVX512DQ-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512DQ-NEXT: vpternlogq $236, %ymm11, %ymm12, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & ymm11) | ymm12
; AVX512DQ-NEXT: vpshufb %xmm9, %xmm7, %xmm9
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm9, %xmm12, %xmm9
@@ -3446,17 +3446,17 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7],ymm13[8,9,10,11,12],ymm9[13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm0 ^ (ymm12 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (mem & (ymm13 ^ ymm12))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm12, %ymm13, %ymm13
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm3 ^ (ymm10 & (ymm5 ^ ymm3))
; AVX512DQ-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15],zero,zero,zero,xmm10[u,u,u]
; AVX512DQ-NEXT: vpor %xmm14, %xmm10, %xmm10
-; AVX512DQ-NEXT: vpternlogq $236, %ymm11, %ymm13, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = (ymm10 & ymm11) | ymm13
; AVX512DQ-NEXT: vpshufb %xmm12, %xmm7, %xmm11
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm11, %xmm12, %xmm11
@@ -3464,39 +3464,39 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm11 = ymm10[0,1,2,3,4],ymm11[5,6,7],ymm10[8,9,10,11,12],ymm11[13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm11 = ymm0 ^ (ymm11 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm11))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm12, %ymm12
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm13
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm5 ^ ymm3))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[3,8,13],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[2,7,12],zero,zero,zero,xmm13[0,5,10,15,u,u,u]
; AVX512DQ-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & mem) | ymm12
; AVX512DQ-NEXT: vpshufb %xmm11, %xmm7, %xmm7
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
; AVX512DQ-NEXT: vpmovsxwq {{.*#+}} ymm8 = [18446744073709551615,18446744073709551615,18446744073709551615,255]
-; AVX512DQ-NEXT: vpternlogq $184, %ymm13, %ymm8, %ymm7
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (ymm8 & (ymm7 ^ ymm13))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (ymm2 & (ymm5 ^ ymm3))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm4[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm4))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,5,0,5,0,5,0,5]
; AVX512DQ-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX512DQ-NEXT: vpternlogq $184, %ymm0, %ymm8, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm8 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa %ymm6, (%rsi)
; AVX512DQ-NEXT: vmovdqa %ymm9, (%rdx)
; AVX512DQ-NEXT: vmovdqa %ymm10, (%rcx)
@@ -3513,20 +3513,20 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm4
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm3 ^ ymm5))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm9[4,9,14],zero,zero,zero,xmm9[2,7,12,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm9, %xmm9
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm11, %ymm6, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = (ymm9 & ymm11) | ymm6
; AVX512DQ-FCP-NEXT: vmovdqa 144(%rdi), %xmm7
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %xmm8
@@ -3537,18 +3537,18 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm9[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm9
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm0 ^ (ymm9 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm9[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm9))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm12, %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm3, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm5 ^ (ymm13 & (ymm3 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14],zero,zero,zero,xmm13[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[0,5,10,15],zero,zero,zero,xmm13[3,8,13,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm11, %ymm12, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & ymm11) | ymm12
; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm7, %xmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm9, %xmm12, %xmm9
@@ -3556,17 +3556,17 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm9 = ymm13[0,1,2,3,4],ymm9[5,6,7],ymm13[8,9,10,11,12],ymm9[13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm12
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm0 ^ (ymm12 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (mem & (ymm13 ^ ymm12))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm12, %ymm13, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm3 ^ (ymm10 & (ymm5 ^ ymm3))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15],zero,zero,zero,xmm10[u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm14, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm11, %ymm13, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = (ymm10 & ymm11) | ymm13
; AVX512DQ-FCP-NEXT: vpshufb %xmm12, %xmm7, %xmm11
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm11, %xmm12, %xmm11
@@ -3574,39 +3574,39 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm10[0,1,2,3,4],ymm11[5,6,7],ymm10[8,9,10,11,12],ymm11[13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, %ymm11
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm0 ^ (ymm11 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm11, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (mem & (ymm12 ^ ymm11))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm12, %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm5 ^ ymm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[3,8,13],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm13
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm13[2,7,12],zero,zero,zero,xmm13[0,5,10,15,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm14, %xmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = (ymm13 & mem) | ymm12
; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm7, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
; AVX512DQ-FCP-NEXT: vpmovsxwq {{.*#+}} ymm8 = [18446744073709551615,18446744073709551615,18446744073709551615,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm13, %ymm8, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm5, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (ymm8 & (ymm7 ^ ymm13))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm3 ^ (ymm2 & (ymm5 ^ ymm3))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm4[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm4))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm2 = [0,5,0,5,0,5,0,5]
; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm0, %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm8 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, (%rcx)
@@ -6421,26 +6421,26 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa64 64(%rdi), %ymm21
; AVX512-NEXT: vmovdqa64 96(%rdi), %ymm22
; AVX512-NEXT: vmovdqa %ymm5, %ymm4
-; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm24 ^ (ymm7 & (ymm23 ^ ymm24))
; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[2,7,12,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm12
; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512-NEXT: vpternlogq $236, %ymm19, %ymm6, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = (ymm12 & ymm19) | ymm6
; AVX512-NEXT: vmovdqa64 192(%rdi), %ymm25
; AVX512-NEXT: vmovdqa 224(%rdi), %ymm7
; AVX512-NEXT: vmovdqa %ymm4, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm25 ^ (ymm9 & (ymm7 ^ ymm25))
; AVX512-NEXT: vmovdqa 208(%rdi), %xmm8
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm8 ^ (mem & (ymm9 ^ ymm8))
; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,ymm9[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vmovdqa 176(%rdi), %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
@@ -6448,7 +6448,7 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
; AVX512-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512-NEXT: vpternlogq $186, %ymm13, %ymm16, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & ~ymm16) | ymm13
; AVX512-NEXT: vmovdqa 144(%rdi), %xmm13
; AVX512-NEXT: vpshufb %xmm10, %xmm13, %xmm10
; AVX512-NEXT: vmovdqa 128(%rdi), %xmm14
@@ -6457,11 +6457,11 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $184, %zmm12, %zmm20, %zmm10
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm20 & (zmm10 ^ zmm12))
; AVX512-NEXT: vmovdqa 256(%rdi), %ymm15
; AVX512-NEXT: vmovdqa 288(%rdi), %ymm12
; AVX512-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm12 ^ ymm15))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero
@@ -6470,145 +6470,145 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm18
; AVX512-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm2
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm7 ^ ymm25))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm8 ^ (mem & (ymm2 ^ ymm8))
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm9[0,5,10,15,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm3, %xmm10, %xmm3
-; AVX512-NEXT: vpternlogq $186, %ymm2, %ymm16, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~ymm16) | ymm2
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm0 ^ (ymm16 & (ymm3 ^ ymm0))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm24 ^ (ymm1 & (ymm23 ^ ymm24))
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & ymm19) | ymm2
; AVX512-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $184, %zmm1, %zmm20, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm1))
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm17
; AVX512-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,1,6,11],zero,zero,zero,zero,xmm0[4,9,14],zero,zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13]
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa %ymm4, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-NEXT: vmovdqa %ymm10, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[1,6,11],zero,zero,zero,zero,xmm6[4,9,14,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15],zero,zero,zero,xmm3[u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm19) | ymm2
; AVX512-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $184, %zmm3, %zmm20, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm3))
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm19
; AVX512-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm15 ^ (ymm0 & (ymm12 ^ ymm15))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,2,7,12],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-NEXT: vmovdqa %ymm4, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15,u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & mem) | ymm2
; AVX512-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vpmovsxwq {{.*#+}} zmm2 = [0,0,0,18446744073709551360,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512-NEXT: vpternlogq $226, %zmm3, %zmm2, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm3 ^ (zmm2 & (zmm0 ^ zmm3))
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogq $226, %ymm15, %ymm4, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm15 ^ (ymm4 & (ymm12 ^ ymm15))
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
; AVX512-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm10
-; AVX512-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm25 ^ (ymm10 & (ymm7 ^ ymm25))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm8 ^ (mem & (ymm10 ^ ymm8))
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm7, %xmm6
; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-NEXT: vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm16 & (ymm3 ^ ymm1))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm24 ^ ymm23))
; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
; AVX512-NEXT: vpor %xmm1, %xmm5, %xmm1
-; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (mem & (ymm5 ^ ymm4))
; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm4
; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,5,0,5,0,5,0,5]
; AVX512-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512-NEXT: vpternlogq $226, %zmm1, %zmm2, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm2 & (zmm4 ^ zmm1))
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm18, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm17, (%rdx)
@@ -6626,26 +6626,26 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %ymm21
; AVX512-FCP-NEXT: vmovdqa64 96(%rdi), %ymm22
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm4
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm24 ^ (ymm7 & (ymm23 ^ ymm24))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[2,7,12,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm12
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm19, %ymm6, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = (ymm12 & ymm19) | ymm6
; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
; AVX512-FCP-NEXT: vmovdqa 224(%rdi), %ymm7
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm9
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm25 ^ (ymm9 & (ymm7 ^ ymm25))
; AVX512-FCP-NEXT: vmovdqa 208(%rdi), %xmm8
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm8 ^ (mem & (ymm9 ^ ymm8))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,ymm9[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vmovdqa 176(%rdi), %xmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
@@ -6653,7 +6653,7 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $186, %ymm13, %ymm16, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & ~ymm16) | ymm13
; AVX512-FCP-NEXT: vmovdqa 144(%rdi), %xmm13
; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm10
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %xmm14
@@ -6662,11 +6662,11 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm10
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm12, %zmm20, %zmm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm20 & (zmm10 ^ zmm12))
; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %ymm15
; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %ymm12
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm12 ^ ymm15))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero
@@ -6675,145 +6675,145 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm18
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm7 ^ ymm25))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm8 ^ (mem & (ymm2 ^ ymm8))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm9[0,5,10,15,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm10, %xmm3
-; AVX512-FCP-NEXT: vpternlogq $186, %ymm2, %ymm16, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~ymm16) | ymm2
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm0 ^ (ymm16 & (ymm3 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm24 ^ (ymm1 & (ymm23 ^ ymm24))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & ymm19) | ymm2
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm20, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm1))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm17
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,1,6,11],zero,zero,zero,zero,xmm0[4,9,14],zero,zero,zero
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13]
; AVX512-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm10, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[1,6,11],zero,zero,zero,zero,xmm6[4,9,14,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15],zero,zero,zero,xmm3[u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm19) | ymm2
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm3, %zmm20, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm3))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm19
; AVX512-FCP-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm15 ^ (ymm0 & (ymm12 ^ ymm15))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,2,7,12],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm4, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & mem) | ymm2
; AVX512-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} zmm2 = [0,0,0,18446744073709551360,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm3 ^ (zmm2 & (zmm0 ^ zmm3))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm15, %ymm4, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm15 ^ (ymm4 & (ymm12 ^ ymm15))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
; AVX512-FCP-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm25 ^ (ymm10 & (ymm7 ^ ymm25))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm8 ^ (mem & (ymm10 ^ ymm8))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm16 & (ymm3 ^ ymm1))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm24 ^ ymm23))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm1, %xmm5, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (mem & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,5,0,5,0,5,0,5]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm1, %zmm2, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm2 & (zmm4 ^ zmm1))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm1
; AVX512-FCP-NEXT: vmovdqa64 %zmm18, (%rsi)
; AVX512-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
@@ -6831,26 +6831,26 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %ymm21
; AVX512DQ-NEXT: vmovdqa64 96(%rdi), %ymm22
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm4
-; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm24 ^ (ymm7 & (ymm23 ^ ymm24))
; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[2,7,12,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm12
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm19, %ymm6, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = (ymm12 & ymm19) | ymm6
; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %ymm25
; AVX512DQ-NEXT: vmovdqa 224(%rdi), %ymm7
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm25 ^ (ymm9 & (ymm7 ^ ymm25))
; AVX512DQ-NEXT: vmovdqa 208(%rdi), %xmm8
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm8 ^ (mem & (ymm9 ^ ymm8))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,ymm9[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vmovdqa 176(%rdi), %xmm9
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
@@ -6858,7 +6858,7 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $186, %ymm13, %ymm16, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & ~ymm16) | ymm13
; AVX512DQ-NEXT: vmovdqa 144(%rdi), %xmm13
; AVX512DQ-NEXT: vpshufb %xmm10, %xmm13, %xmm10
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %xmm14
@@ -6867,11 +6867,11 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm10
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm12, %zmm20, %zmm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm20 & (zmm10 ^ zmm12))
; AVX512DQ-NEXT: vmovdqa 256(%rdi), %ymm15
; AVX512DQ-NEXT: vmovdqa 288(%rdi), %ymm12
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm12 ^ ymm15))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero
@@ -6880,145 +6880,145 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm18
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm2
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm7 ^ ymm25))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm8 ^ (mem & (ymm2 ^ ymm8))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm9[0,5,10,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm10, %xmm3
-; AVX512DQ-NEXT: vpternlogq $186, %ymm2, %ymm16, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~ymm16) | ymm2
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm0 ^ (ymm16 & (ymm3 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm24 ^ (ymm1 & (ymm23 ^ ymm24))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & ymm19) | ymm2
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $184, %zmm1, %zmm20, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm1))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm17
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,1,6,11],zero,zero,zero,zero,xmm0[4,9,14],zero,zero,zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13]
; AVX512DQ-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[1,6,11],zero,zero,zero,zero,xmm6[4,9,14,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15],zero,zero,zero,xmm3[u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm19) | ymm2
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $184, %zmm3, %zmm20, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm3))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm19
; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm15 ^ (ymm0 & (ymm12 ^ ymm15))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,2,7,12],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15,u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & mem) | ymm2
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovsxwq {{.*#+}} zmm2 = [0,0,0,18446744073709551360,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm3, %zmm2, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm3 ^ (zmm2 & (zmm0 ^ zmm3))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogq $226, %ymm15, %ymm4, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm15 ^ (ymm4 & (ymm12 ^ ymm15))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
; AVX512DQ-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm10
-; AVX512DQ-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm25 ^ (ymm10 & (ymm7 ^ ymm25))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm8 ^ (mem & (ymm10 ^ ymm8))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm6
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm16 & (ymm3 ^ ymm1))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm24 ^ ymm23))
; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm1, %xmm5, %xmm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (mem & (ymm5 ^ ymm4))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm4
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,5,0,5,0,5,0,5]
; AVX512DQ-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpternlogq $226, %zmm1, %zmm2, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm2 & (zmm4 ^ zmm1))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm1
; AVX512DQ-NEXT: vmovdqa64 %zmm18, (%rsi)
; AVX512DQ-NEXT: vmovdqa64 %zmm17, (%rdx)
@@ -7036,26 +7036,26 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %ymm21
; AVX512DQ-FCP-NEXT: vmovdqa64 96(%rdi), %ymm22
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm4
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm4))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,1,6,11,16,21,26,31,20,25,30,19,24,29,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm24 ^ (ymm7 & (ymm23 ^ ymm24))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[2,7,12,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm12
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm19, %ymm6, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = (ymm12 & ymm19) | ymm6
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm25
; AVX512DQ-FCP-NEXT: vmovdqa 224(%rdi), %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm9
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm25 ^ (ymm9 & (ymm7 ^ ymm25))
; AVX512DQ-FCP-NEXT: vmovdqa 208(%rdi), %xmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm8 ^ (mem & (ymm9 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,ymm9[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vmovdqa 176(%rdi), %xmm9
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
@@ -7063,7 +7063,7 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $186, %ymm13, %ymm16, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = (ymm0 & ~ymm16) | ymm13
; AVX512DQ-FCP-NEXT: vmovdqa 144(%rdi), %xmm13
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm13, %xmm10
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %xmm14
@@ -7072,11 +7072,11 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm10, %ymm0, %ymm10
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm12, %zmm20, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (zmm20 & (zmm10 ^ zmm12))
; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %ymm15
; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm12 ^ ymm15))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero
@@ -7085,145 +7085,145 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm10, %zmm18
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm7 ^ ymm25))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm8 ^ (mem & (ymm2 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm9[0,5,10,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm10, %xmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $186, %ymm2, %ymm16, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~ymm16) | ymm2
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm0 ^ (ymm16 & (ymm3 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,2,7,12,17,22,27,16,21,26,31,20,25,30,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm23, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm24 ^ (ymm1 & (ymm23 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & ymm19) | ymm2
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm20, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm1))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm12, %ymm15, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm15 ^ ymm12))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,1,6,11],zero,zero,zero,zero,xmm0[4,9,14],zero,zero,zero
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13]
; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,3,8,13,18,23,28,17,22,27,16,21,26,31,128,128,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[1,6,11],zero,zero,zero,zero,xmm6[4,9,14,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15],zero,zero,zero,xmm3[u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm19, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm19) | ymm2
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm3, %zmm20, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm20 & (zmm0 ^ zmm3))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm19
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm15, %ymm12, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm15 ^ (ymm0 & (ymm12 ^ ymm15))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,2,7,12],zero,zero,zero,xmm0[0,5,10,15],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm25, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm7 ^ (ymm1 & (ymm25 ^ ymm7))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm8 ^ (mem & (ymm1 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm0, %ymm16, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm16 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm22 ^ ymm21))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %ymm0, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm23 ^ (ymm3 & (ymm24 ^ ymm23))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & mem) | ymm2
; AVX512DQ-FCP-NEXT: vpshufb %xmm0, %xmm13, %xmm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpmovsxwq {{.*#+}} zmm2 = [0,0,0,18446744073709551360,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm3 ^ (zmm2 & (zmm0 ^ zmm3))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm15, %ymm4, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm15 ^ (ymm4 & (ymm12 ^ ymm15))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm7, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm25 ^ (ymm10 & (ymm7 ^ ymm25))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm8 ^ (mem & (ymm10 ^ ymm8))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm10[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm7, %xmm6
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm24, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm1 ^ (ymm16 & (ymm3 ^ ymm1))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm23 ^ (ymm5 & (ymm24 ^ ymm23))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm1, %xmm5, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm22 ^ (ymm4 & (ymm21 ^ ymm22))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (mem & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,5,0,5,0,5,0,5]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm1, %zmm2, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm1 ^ (zmm2 & (zmm4 ^ zmm1))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, (%rdx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 1391e8e86869a7..aa9a9f20645e27 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -1397,7 +1397,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm1 ^ (ymm2 & (ymm0 ^ ymm1))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,7,14],zero,zero,xmm2[3,10],zero,xmm2[u,u,u,u,u,u,u,u]
@@ -1408,7 +1408,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,8,15],zero,zero,xmm3[4,11],zero,xmm3[u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm4, %xmm3, %xmm3
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,xmm4[u,u,u,u,u,u,u,u]
@@ -1419,7 +1419,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[3,10],zero,zero,zero,xmm5[6,13],zero,xmm5[u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm0 ^ (ymm6 & (ymm1 ^ ymm0))
; AVX512-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,xmm6[u,u,u,u,u,u,u,u]
@@ -1429,7 +1429,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[5,12],zero,zero,xmm7[1,8,15],zero,xmm7[u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,11,0,7,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,13,2,9,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1451,7 +1451,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm1 ^ (ymm2 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,7,14],zero,zero,xmm2[3,10],zero,xmm2[u,u,u,u,u,u,u,u]
@@ -1462,7 +1462,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,8,15],zero,zero,xmm3[4,11],zero,xmm3[u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm4, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,xmm4[u,u,u,u,u,u,u,u]
@@ -1473,7 +1473,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[3,10],zero,zero,zero,xmm5[6,13],zero,xmm5[u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm0 ^ (ymm6 & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,xmm6[u,u,u,u,u,u,u,u]
@@ -1483,7 +1483,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[5,12],zero,zero,xmm7[1,8,15],zero,xmm7[u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,11,0,7,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,13,2,9,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1505,7 +1505,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm1 ^ (ymm2 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,7,14],zero,zero,xmm2[3,10],zero,xmm2[u,u,u,u,u,u,u,u]
@@ -1516,7 +1516,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,8,15],zero,zero,xmm3[4,11],zero,xmm3[u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm4, %xmm3, %xmm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,xmm4[u,u,u,u,u,u,u,u]
@@ -1527,7 +1527,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[3,10],zero,zero,zero,xmm5[6,13],zero,xmm5[u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm0 ^ (ymm6 & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,xmm6[u,u,u,u,u,u,u,u]
@@ -1537,7 +1537,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[5,12],zero,zero,xmm7[1,8,15],zero,xmm7[u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,11,0,7,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,13,2,9,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -1559,7 +1559,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm1 ^ (ymm2 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,7,14],zero,zero,xmm2[3,10],zero,xmm2[u,u,u,u,u,u,u,u]
@@ -1570,7 +1570,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,8,15],zero,zero,xmm3[4,11],zero,xmm3[u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,xmm4[u,u,u,u,u,u,u,u]
@@ -1581,7 +1581,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[3,10],zero,zero,zero,xmm5[6,13],zero,xmm5[u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm1, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm0 ^ (ymm6 & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,xmm6[u,u,u,u,u,u,u,u]
@@ -1591,7 +1591,7 @@ define void @load_i8_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[5,12],zero,zero,xmm7[1,8,15],zero,xmm7[u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,11,0,7,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,13,2,9,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -2815,7 +2815,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm1
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm2 ^ (ymm5 & (ymm1 ^ ymm2))
; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
@@ -2829,7 +2829,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-NEXT: vmovdqa %ymm8, %ymm6
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
@@ -2839,10 +2839,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512-NEXT: vpor %xmm7, %xmm6, %xmm6
; AVX512-NEXT: vpmovsxwq {{.*#+}} xmm7 = [18446744073709551615,255]
-; AVX512-NEXT: vpternlogq $184, %xmm9, %xmm7, %xmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm6 = xmm6 ^ (xmm7 & (xmm6 ^ xmm9))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm9, %ymm10
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm2 ^ (ymm10 & (ymm1 ^ ymm2))
; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm10, %xmm10
; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[0,7,14],zero,zero,xmm10[3,10,u,u,u,u,u,u,u]
@@ -2851,10 +2851,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm11[4,11]
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512-NEXT: vpor %xmm12, %xmm11, %xmm11
-; AVX512-NEXT: vpternlogq $184, %xmm10, %xmm7, %xmm11
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm11 = xmm11 ^ (xmm7 & (xmm11 ^ xmm10))
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa %ymm10, %ymm12
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm1 ^ ymm2))
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u,u,u,u,u,u]
@@ -2863,8 +2863,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm13[5,12]
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512-NEXT: vpor %xmm15, %xmm14, %xmm14
-; AVX512-NEXT: vpternlogq $184, %xmm12, %xmm7, %xmm14
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm14 = xmm14 ^ (xmm7 & (xmm14 ^ xmm12))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm1 ^ (ymm8 & (ymm2 ^ ymm1))
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
@@ -2872,8 +2872,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm13[6,13]
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512-NEXT: vpor %xmm13, %xmm12, %xmm12
-; AVX512-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm12
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm12 = xmm12 ^ (xmm7 & (xmm12 ^ xmm8))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm1 ^ (ymm9 & (ymm2 ^ ymm1))
; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
@@ -2882,8 +2882,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm9[0,7,14]
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512-NEXT: vpor %xmm13, %xmm9, %xmm9
-; AVX512-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm9
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm10
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm9 = xmm9 ^ (xmm7 & (xmm9 ^ xmm8))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm10 = ymm1 ^ (ymm10 & (ymm2 ^ ymm1))
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[6,13],zero,zero,xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm10, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
@@ -2892,7 +2892,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512-NEXT: vpternlogq $184, %xmm1, %xmm7, %xmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (xmm7 & (xmm0 ^ xmm1))
; AVX512-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512-NEXT: vmovdqa %xmm11, (%rcx)
@@ -2913,7 +2913,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm2 ^ (ymm5 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
@@ -2927,7 +2927,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm8, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
@@ -2937,10 +2937,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
; AVX512-FCP-NEXT: vpmovsxwq {{.*#+}} xmm7 = [18446744073709551615,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm9, %xmm7, %xmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm6 = xmm6 ^ (xmm7 & (xmm6 ^ xmm9))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm2 ^ (ymm10 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[0,7,14],zero,zero,xmm10[3,10,u,u,u,u,u,u,u]
@@ -2949,10 +2949,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm11[4,11]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm10, %xmm7, %xmm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm11 = xmm11 ^ (xmm7 & (xmm11 ^ xmm10))
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm10, %ymm12
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm1 ^ ymm2))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u,u,u,u,u,u]
@@ -2961,8 +2961,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm13[5,12]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512-FCP-NEXT: vpor %xmm15, %xmm14, %xmm14
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm12, %xmm7, %xmm14
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm14 = xmm14 ^ (xmm7 & (xmm14 ^ xmm12))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm1 ^ (ymm8 & (ymm2 ^ ymm1))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
@@ -2970,8 +2970,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm13[6,13]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512-FCP-NEXT: vpor %xmm13, %xmm12, %xmm12
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm12
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm12 = xmm12 ^ (xmm7 & (xmm12 ^ xmm8))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm1 ^ (ymm9 & (ymm2 ^ ymm1))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
@@ -2980,8 +2980,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm9[0,7,14]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm13, %xmm9, %xmm9
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm9
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm9 = xmm9 ^ (xmm7 & (xmm9 ^ xmm8))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm1 ^ (ymm10 & (ymm2 ^ ymm1))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[6,13],zero,zero,xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
@@ -2990,7 +2990,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
; AVX512-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512-FCP-NEXT: vpternlogq $184, %xmm1, %xmm7, %xmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (xmm7 & (xmm0 ^ xmm1))
; AVX512-FCP-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512-FCP-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512-FCP-NEXT: vmovdqa %xmm11, (%rcx)
@@ -3011,7 +3011,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm2 ^ (ymm5 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512DQ-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
@@ -3025,7 +3025,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm8, %ymm6
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
@@ -3035,10 +3035,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512DQ-NEXT: vpor %xmm7, %xmm6, %xmm6
; AVX512DQ-NEXT: vpmovsxwq {{.*#+}} xmm7 = [18446744073709551615,255]
-; AVX512DQ-NEXT: vpternlogq $184, %xmm9, %xmm7, %xmm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm6 = xmm6 ^ (xmm7 & (xmm6 ^ xmm9))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm10
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm2 ^ (ymm10 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm10, %xmm10
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[0,7,14],zero,zero,xmm10[3,10,u,u,u,u,u,u,u]
@@ -3047,10 +3047,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm11[4,11]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512DQ-NEXT: vpor %xmm12, %xmm11, %xmm11
-; AVX512DQ-NEXT: vpternlogq $184, %xmm10, %xmm7, %xmm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm11 = xmm11 ^ (xmm7 & (xmm11 ^ xmm10))
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm10, %ymm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm1 ^ ymm2))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u,u,u,u,u,u]
@@ -3059,8 +3059,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm13[5,12]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512DQ-NEXT: vpor %xmm15, %xmm14, %xmm14
-; AVX512DQ-NEXT: vpternlogq $184, %xmm12, %xmm7, %xmm14
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm14 = xmm14 ^ (xmm7 & (xmm14 ^ xmm12))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm1 ^ (ymm8 & (ymm2 ^ ymm1))
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
@@ -3068,8 +3068,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm13[6,13]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512DQ-NEXT: vpor %xmm13, %xmm12, %xmm12
-; AVX512DQ-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm12 = xmm12 ^ (xmm7 & (xmm12 ^ xmm8))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm1 ^ (ymm9 & (ymm2 ^ ymm1))
; AVX512DQ-NEXT: vextracti128 $1, %ymm9, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
@@ -3078,8 +3078,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm9[0,7,14]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm13, %xmm9, %xmm9
-; AVX512DQ-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm10
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm9 = xmm9 ^ (xmm7 & (xmm9 ^ xmm8))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm10 = ymm1 ^ (ymm10 & (ymm2 ^ ymm1))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[6,13],zero,zero,xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm10, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
@@ -3088,7 +3088,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
; AVX512DQ-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-NEXT: vpternlogq $184, %xmm1, %xmm7, %xmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (xmm7 & (xmm0 ^ xmm1))
; AVX512DQ-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512DQ-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512DQ-NEXT: vmovdqa %xmm11, (%rcx)
@@ -3109,7 +3109,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm2 ^ (ymm5 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0,1,2,3,4],xmm4[5,6,7]
@@ -3123,7 +3123,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm8, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
@@ -3133,10 +3133,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
; AVX512DQ-FCP-NEXT: vpmovsxwq {{.*#+}} xmm7 = [18446744073709551615,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm9, %xmm7, %xmm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm6 = xmm6 ^ (xmm7 & (xmm6 ^ xmm9))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm2 ^ (ymm10 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm10
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[0,7,14],zero,zero,xmm10[3,10,u,u,u,u,u,u,u]
@@ -3145,10 +3145,10 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm11[4,11]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm10, %xmm7, %xmm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm11 = xmm11 ^ (xmm7 & (xmm11 ^ xmm10))
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm10, %ymm12
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm1, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm1 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u,u,u,u,u,u]
@@ -3157,8 +3157,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm13[5,12]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm15, %xmm14, %xmm14
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm12, %xmm7, %xmm14
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm14 = xmm14 ^ (xmm7 & (xmm14 ^ xmm12))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm1 ^ (ymm8 & (ymm2 ^ ymm1))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
@@ -3166,8 +3166,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm13[6,13]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm13, %xmm12, %xmm12
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm12
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm12 = xmm12 ^ (xmm7 & (xmm12 ^ xmm8))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm1 ^ (ymm9 & (ymm2 ^ ymm1))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
@@ -3176,8 +3176,8 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm9[0,7,14]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm13, %xmm9, %xmm9
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm8, %xmm7, %xmm9
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm2, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm9 = xmm9 ^ (xmm7 & (xmm9 ^ xmm8))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm1 ^ (ymm10 & (ymm2 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm10[6,13],zero,zero,xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
@@ -3186,7 +3186,7 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %xmm1, %xmm7, %xmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm0 = xmm0 ^ (xmm7 & (xmm0 ^ xmm1))
; AVX512DQ-FCP-NEXT: vmovdqa %xmm5, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm6, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa %xmm11, (%rcx)
@@ -5646,7 +5646,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX512-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & (ymm3 ^ ymm2))
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -5666,7 +5666,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm7
; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1
; AVX512-NEXT: vmovdqa %ymm14, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm7 ^ (ymm9 & (ymm6 ^ ymm7))
; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm10
; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,12],zero,zero,xmm10[1,8,15,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,7,14],zero,zero,xmm9[3,10],zero,zero,zero,xmm9[u,u,u,u,u,u]
@@ -5674,16 +5674,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa 96(%rdi), %ymm9
; AVX512-NEXT: vmovdqa %ymm11, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm9 ^ ymm1))
; AVX512-NEXT: vmovdqa 80(%rdi), %xmm10
; AVX512-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7,8,9],ymm10[10],ymm15[11,12],ymm10[13],ymm15[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 | (ymm13 & mem)
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512-NEXT: vpternlogq $226, %ymm12, %ymm16, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm12 ^ (ymm16 & (ymm8 ^ ymm12))
; AVX512-NEXT: vmovdqa64 %ymm8, %ymm18
; AVX512-NEXT: vmovdqa %ymm11, %ymm12
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm3 ^ ymm2))
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,6,13],zero,zero,xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u,u],zero,zero,xmm12[4,11],zero,zero,xmm12[0,7,14,u,u,u,u]
@@ -5696,31 +5696,31 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5,6],ymm8[7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-NEXT: vmovdqa %ymm13, %ymm12
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm7 ^ (ymm12 & (ymm6 ^ ymm7))
; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm15
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[1,8,15],zero,zero,xmm12[4,11],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm15, %xmm12, %xmm15
; AVX512-NEXT: vmovdqa %ymm14, %ymm12
-; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm9 ^ (ymm12 & (ymm1 ^ ymm9))
; AVX512-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm10[2],ymm12[3,4,5],ymm10[6],ymm12[7,8,9],ymm10[10],ymm12[11,12,13],ymm10[14],ymm12[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm12
-; AVX512-NEXT: vpternlogq $226, %ymm8, %ymm16, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 | (ymm15 & ~mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm8 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512-NEXT: vmovdqa64 %ymm12, %ymm19
; AVX512-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm7 ^ (ymm8 & (ymm6 ^ ymm7))
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm8[2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm15, %xmm8, %xmm8
; AVX512-NEXT: vmovdqa %ymm13, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm9 ^ (ymm15 & (ymm1 ^ ymm9))
; AVX512-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1,2],ymm10[3],ymm15[4,5],ymm10[6],ymm15[7,8,9,10],ymm10[11],ymm15[12,13],ymm10[14],ymm15[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm8 & ymm17)
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm3 ^ (ymm14 & (ymm2 ^ ymm3))
; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u,u,u]
@@ -5736,7 +5736,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
; AVX512-NEXT: vmovdqa64 %ymm8, %ymm20
; AVX512-NEXT: vmovdqa %ymm13, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,1,8,15],zero,zero,xmm8[4,11],zero,zero,xmm8[u,u,u,u,u]
@@ -5747,23 +5747,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm12, %xmm14, %xmm12
; AVX512-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512-NEXT: vmovdqa %ymm11, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm7 ^ (ymm8 & (ymm6 ^ ymm7))
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm14, %xmm8, %xmm8
; AVX512-NEXT: vmovdqa %ymm0, %ymm14
-; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm9 ^ (ymm14 & (ymm1 ^ ymm9))
; AVX512-NEXT: vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2],ymm10[3],ymm14[4,5,6],ymm10[7,8],ymm14[9,10],ymm10[11],ymm14[12,13,14],ymm10[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm8 & ymm17)
; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
; AVX512-NEXT: vmovdqa64 %ymm8, %ymm21
; AVX512-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u]
@@ -5773,22 +5773,22 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
; AVX512-NEXT: vpor %xmm12, %xmm14, %xmm12
; AVX512-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512-NEXT: vmovdqa %ymm13, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (ymm8 & (ymm7 ^ ymm6))
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm14
; AVX512-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm14, %xmm8, %xmm8
; AVX512-NEXT: vmovdqa %ymm11, %ymm14
-; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm9 ^ (ymm14 & (ymm1 ^ ymm9))
; AVX512-NEXT: vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2,3],ymm10[4],ymm14[5,6],ymm10[7,8],ymm14[9,10,11],ymm10[12],ymm14[13,14],ymm10[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm8 & ymm17)
; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm8[4,5,6,7]
; AVX512-NEXT: vmovdqa %ymm11, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u]
@@ -5798,21 +5798,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14]
; AVX512-NEXT: vpor %xmm12, %xmm15, %xmm12
; AVX512-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (ymm8 & (ymm7 ^ ymm6))
; AVX512-NEXT: vextracti128 $1, %ymm8, %xmm15
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[5,12],zero,zero,xmm8[1,8,15],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm15, %xmm8, %xmm8
; AVX512-NEXT: vmovdqa %ymm13, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm9 ^ ymm1))
; AVX512-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0],ymm10[1],ymm15[2,3],ymm10[4],ymm15[5,6,7,8],ymm10[9],ymm15[10,11],ymm10[12],ymm15[13,14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm8 & ymm17)
; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
-; AVX512-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm2 ^ (ymm13 & (ymm3 ^ ymm2))
; AVX512-NEXT: vextracti128 $1, %ymm13, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm13[u,u,4,11],zero,zero,xmm13[0,7,14],zero,zero,xmm13[u,u,u,u,u]
@@ -5822,16 +5822,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15]
; AVX512-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-NEXT: vpternlogq $184, %ymm2, %ymm16, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm11
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm16 & (ymm3 ^ ymm2))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm11 = ymm6 ^ (ymm11 & (ymm7 ^ ymm6))
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm11[6,13],zero,zero,xmm11[2,9],zero,zero,zero,xmm11[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm11, %xmm4
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & (ymm9 ^ ymm1))
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7,8],ymm10[9],ymm0[10,11,12],ymm10[13],ymm0[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ymm17)
; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512-NEXT: vmovdqa64 %ymm18, (%rsi)
@@ -5852,7 +5852,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & (ymm3 ^ ymm2))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -5868,7 +5868,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,7,14],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u]
@@ -5876,16 +5876,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm11
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm1 ^ (ymm11 & (ymm7 ^ ymm1))
; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm8
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7,8,9],ymm8[10],ymm11[11,12],ymm8[13],ymm11[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm10 & mem)
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm6, %ymm16, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm6 ^ (ymm16 & (ymm11 ^ ymm6))
; AVX512-FCP-NEXT: vmovdqa64 %ymm11, %ymm18
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm3 ^ ymm2))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[u,u,u,6,13],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u],zero,zero,xmm6[4,11],zero,zero,xmm6[0,7,14,u,u,u,u]
@@ -5897,30 +5897,30 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm5 ^ (ymm10 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[6,13],zero,zero,xmm14[2,9,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[1,8,15],zero,zero,xmm10[4,11],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm14, %xmm10, %xmm14
; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm7 ^ (ymm10 & (ymm1 ^ ymm7))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm8[2],ymm10[3,4,5],ymm8[6],ymm10[7,8,9],ymm8[10],ymm10[11,12,13],ymm8[14],ymm10[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm6, %ymm16, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 | (ymm14 & ~mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm16 & (ymm10 ^ ymm6))
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm5 ^ (ymm6 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm6[2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm14, %xmm6
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm14
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm14
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm7 ^ (ymm14 & (ymm1 ^ ymm7))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1,2],ymm8[3],ymm14[4,5],ymm8[6],ymm14[7,8,9,10],ymm8[11],ymm14[12,13],ymm8[14],ymm14[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm14
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm6 & ymm17)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm2 ^ ymm3))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero,xmm13[u,u,u,u]
@@ -5934,7 +5934,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm19
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm12
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u]
@@ -5947,23 +5947,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm5 ^ (ymm6 & (ymm4 ^ ymm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm15
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm7 ^ (ymm15 & (ymm1 ^ ymm7))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2],ymm8[3],ymm15[4,5,6],ymm8[7,8],ymm15[9,10],ymm8[11],ymm15[12,13,14],ymm8[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm6[u,u,2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u]
@@ -5973,23 +5973,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
; AVX512-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm4 ^ (ymm6 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[2,9],zero,zero,zero,xmm15[5,12,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm15
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm7 ^ (ymm15 & (ymm1 ^ ymm7))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2,3],ymm8[4],ymm15[5,6],ymm8[7,8],ymm15[9,10,11],ymm8[12],ymm15[13,14],ymm8[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm6, %ymm21
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
@@ -5999,21 +5999,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14]
; AVX512-FCP-NEXT: vpor %xmm15, %xmm13, %xmm13
; AVX512-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm4 ^ (ymm6 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm15
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm7 ^ ymm1))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6,7,8],ymm8[9],ymm15[10,11],ymm8[12],ymm15[13,14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm2 ^ (ymm11 & (ymm3 ^ ymm2))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm11, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[u,u,4,11],zero,zero,xmm11[0,7,14],zero,zero,xmm11[u,u,u,u,u]
@@ -6023,16 +6023,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[1,8,15]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm11, %xmm3
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm2, %ymm16, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm16 & (ymm3 ^ ymm2))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm4 ^ (ymm9 & (ymm5 ^ ymm4))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm9[6,13],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm9, %xmm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & (ymm7 ^ ymm1))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2,3,4],ymm8[5],ymm0[6,7,8],ymm8[9],ymm0[10,11,12],ymm8[13],ymm0[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa64 %ymm18, (%rsi)
@@ -6053,7 +6053,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & (ymm3 ^ ymm2))
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -6073,7 +6073,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm7
; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm1
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm7 ^ (ymm9 & (ymm6 ^ ymm7))
; AVX512DQ-NEXT: vextracti128 $1, %ymm9, %xmm10
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,12],zero,zero,xmm10[1,8,15,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[0,7,14],zero,zero,xmm9[3,10],zero,zero,zero,xmm9[u,u,u,u,u,u]
@@ -6081,16 +6081,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm9
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm9 ^ ymm1))
; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm10
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7,8,9],ymm10[10],ymm15[11,12],ymm10[13],ymm15[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 | (ymm13 & mem)
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm12, %ymm16, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm12 ^ (ymm16 & (ymm8 ^ ymm12))
; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm18
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm2 ^ (ymm12 & (ymm3 ^ ymm2))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,6,13],zero,zero,xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm12
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u,u],zero,zero,xmm12[4,11],zero,zero,xmm12[0,7,14,u,u,u,u]
@@ -6103,31 +6103,31 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5,6],ymm8[7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm7 ^ (ymm12 & (ymm6 ^ ymm7))
; AVX512DQ-NEXT: vextracti128 $1, %ymm12, %xmm15
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[1,8,15],zero,zero,xmm12[4,11],zero,zero,xmm12[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm15, %xmm12, %xmm15
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm12
-; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm9 ^ (ymm12 & (ymm1 ^ ymm9))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm10[2],ymm12[3,4,5],ymm10[6],ymm12[7,8,9],ymm10[10],ymm12[11,12,13],ymm10[14],ymm12[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm12
-; AVX512DQ-NEXT: vpternlogq $226, %ymm8, %ymm16, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 | (ymm15 & ~mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm8 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512DQ-NEXT: vmovdqa64 %ymm12, %ymm19
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm7 ^ (ymm8 & (ymm6 ^ ymm7))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm8[2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm15, %xmm8, %xmm8
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm9 ^ (ymm15 & (ymm1 ^ ymm9))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1,2],ymm10[3],ymm15[4,5],ymm10[6],ymm15[7,8,9,10],ymm10[11],ymm15[12,13],ymm10[14],ymm15[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm8 & ymm17)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm3 ^ (ymm14 & (ymm2 ^ ymm3))
; AVX512DQ-NEXT: vextracti128 $1, %ymm14, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u,u,u]
@@ -6143,7 +6143,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm20
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm12
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u,1,8,15],zero,zero,xmm8[4,11],zero,zero,xmm8[u,u,u,u,u]
@@ -6154,23 +6154,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm12, %xmm14, %xmm12
; AVX512DQ-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512DQ-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm7, %ymm6, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm7 ^ (ymm8 & (ymm6 ^ ymm7))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm14, %xmm8, %xmm8
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm14
-; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm9 ^ (ymm14 & (ymm1 ^ ymm9))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2],ymm10[3],ymm14[4,5,6],ymm10[7,8],ymm14[9,10],ymm10[11],ymm14[12,13,14],ymm10[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm8 & ymm17)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %ymm8, %ymm21
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u]
@@ -6180,22 +6180,22 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
; AVX512DQ-NEXT: vpor %xmm12, %xmm14, %xmm12
; AVX512DQ-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512DQ-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (ymm8 & (ymm7 ^ ymm6))
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm14
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm14, %xmm8, %xmm8
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm14
-; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm1, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm9 ^ (ymm14 & (ymm1 ^ ymm9))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2,3],ymm10[4],ymm14[5,6],ymm10[7,8],ymm14[9,10,11],ymm10[12],ymm14[13,14],ymm10[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm8 & ymm17)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm8[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm3 ^ (ymm8 & (ymm2 ^ ymm3))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm8[u,u,3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm8
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u]
@@ -6205,21 +6205,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14]
; AVX512DQ-NEXT: vpor %xmm12, %xmm15, %xmm12
; AVX512DQ-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512DQ-NEXT: vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm16 & (ymm12 ^ ymm8))
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm8
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm6 ^ (ymm8 & (ymm7 ^ ymm6))
; AVX512DQ-NEXT: vextracti128 $1, %ymm8, %xmm15
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[5,12],zero,zero,xmm8[1,8,15],zero,zero,xmm8[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm15, %xmm8, %xmm8
; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm9 ^ ymm1))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0],ymm10[1],ymm15[2,3],ymm10[4],ymm15[5,6,7,8],ymm10[9],ymm15[10,11],ymm10[12],ymm15[13,14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm8 & ymm17)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm2 ^ (ymm13 & (ymm3 ^ ymm2))
; AVX512DQ-NEXT: vextracti128 $1, %ymm13, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm13[u,u,4,11],zero,zero,xmm13[0,7,14],zero,zero,xmm13[u,u,u,u,u]
@@ -6229,16 +6229,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15]
; AVX512DQ-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpternlogq $184, %ymm2, %ymm16, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm7, %ymm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm16 & (ymm3 ^ ymm2))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm11 = ymm6 ^ (ymm11 & (ymm7 ^ ymm6))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm11[6,13],zero,zero,xmm11[2,9],zero,zero,zero,xmm11[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm11, %xmm4
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm9, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & (ymm9 ^ ymm1))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7,8],ymm10[9],ymm0[10,11,12],ymm10[13],ymm0[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ymm17)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa64 %ymm18, (%rsi)
@@ -6259,7 +6259,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm2 ^ (ymm1 & (ymm3 ^ ymm2))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -6275,7 +6275,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm5 ^ (ymm7 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[0,7,14],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u]
@@ -6283,16 +6283,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm11
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm1 ^ (ymm11 & (ymm7 ^ ymm1))
; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm8
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7,8,9],ymm8[10],ymm11[11,12],ymm8[13],ymm11[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm10 & mem)
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,16777215,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm6, %ymm16, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm6 ^ (ymm16 & (ymm11 ^ ymm6))
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm11, %ymm18
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm2 ^ (ymm6 & (ymm3 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm6[u,u,u,6,13],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,u],zero,zero,xmm6[4,11],zero,zero,xmm6[0,7,14,u,u,u,u]
@@ -6304,30 +6304,30 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm5 ^ (ymm10 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm10, %xmm14
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[6,13],zero,zero,xmm14[2,9,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[1,8,15],zero,zero,xmm10[4,11],zero,zero,xmm10[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm14, %xmm10, %xmm14
; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm7 ^ (ymm10 & (ymm1 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm8[2],ymm10[3,4,5],ymm8[6],ymm10[7,8,9],ymm8[10],ymm10[11,12,13],ymm8[14],ymm10[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm6, %ymm16, %ymm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm10 | (ymm14 & ~mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm6 ^ (ymm16 & (ymm10 ^ ymm6))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm5 ^ (ymm6 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm14 = xmm6[2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm14, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm14
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm14
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm7 ^ (ymm14 & (ymm1 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm14[0,1,2],ymm8[3],ymm14[4,5],ymm8[6],ymm14[7,8,9,10],ymm8[11],ymm14[12,13],ymm8[14],ymm14[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm14
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm14 | (ymm6 & ymm17)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm3 ^ (ymm13 & (ymm2 ^ ymm3))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero,xmm13[u,u,u,u]
@@ -6341,7 +6341,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm19
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm12
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u,1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u]
@@ -6354,23 +6354,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm16 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm4, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm5 ^ (ymm6 & (ymm4 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm15
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm7 ^ (ymm15 & (ymm1 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2],ymm8[3],ymm15[4,5,6],ymm8[7,8],ymm15[9,10],ymm8[11],ymm15[12,13,14],ymm8[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm20
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm6[u,u,2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u]
@@ -6380,23 +6380,23 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm13, %xmm15, %xmm13
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm4 ^ (ymm6 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[2,9],zero,zero,zero,xmm15[5,12,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm15
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm7, %ymm1, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm7 ^ (ymm15 & (ymm1 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2,3],ymm8[4],ymm15[5,6],ymm8[7,8],ymm15[9,10,11],ymm8[12],ymm15[13,14],ymm8[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm6, %ymm21
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm3, %ymm2, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm3 ^ (ymm6 & (ymm2 ^ ymm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
@@ -6406,21 +6406,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14]
; AVX512DQ-FCP-NEXT: vpor %xmm15, %xmm13, %xmm13
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm6, %ymm16, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 ^ (ymm16 & (ymm13 ^ ymm6))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm4 ^ (ymm6 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm15
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm15
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm1 ^ (ymm15 & (ymm7 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6,7,8],ymm8[9],ymm15[10,11],ymm8[12],ymm15[13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm6 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm2, %ymm3, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm2 ^ (ymm11 & (ymm3 ^ ymm2))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm11, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm11[u,u,4,11],zero,zero,xmm11[0,7,14],zero,zero,xmm11[u,u,u,u,u]
@@ -6430,16 +6430,16 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm11 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[1,8,15]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm11, %xmm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm2, %ymm16, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm4, %ymm5, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm16 & (ymm3 ^ ymm2))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm4 ^ (ymm9 & (ymm5 ^ ymm4))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm9[6,13],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm9, %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm7, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm1 ^ (ymm0 & (ymm7 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2,3,4],ymm8[5],ymm0[6,7,8],ymm8[9],ymm0[10,11,12],ymm8[13],ymm0[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm18, (%rsi)
@@ -11762,7 +11762,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa64 64(%rdi), %ymm31
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm24
-; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm13 ^ (ymm1 & (ymm12 ^ ymm13))
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
@@ -11770,16 +11770,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa64 96(%rdi), %ymm19
; AVX512-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm31, %ymm19, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm31 ^ (ymm2 & (ymm19 ^ ymm31))
; AVX512-NEXT: vmovdqa 80(%rdi), %xmm11
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4],ymm11[5],ymm2[6,7,8,9],ymm11[10],ymm2[11,12],ymm11[13],ymm2[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 | (ymm1 & mem)
; AVX512-NEXT: vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512-NEXT: vmovdqa64 128(%rdi), %ymm21
; AVX512-NEXT: vmovdqa64 160(%rdi), %ymm29
; AVX512-NEXT: vmovdqa %ymm14, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm29, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm21 ^ (ymm1 & (ymm29 ^ ymm21))
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -11804,11 +11804,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm5, %xmm7, %xmm5
; AVX512-NEXT: vinserti32x4 $2, %xmm5, %zmm1, %zmm22
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512-NEXT: vpternlogq $184, %zmm2, %zmm4, %zmm22
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm4 & (zmm22 ^ zmm2))
; AVX512-NEXT: vmovdqa64 288(%rdi), %ymm18
; AVX512-NEXT: vmovdqa64 256(%rdi), %ymm16
; AVX512-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm16, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm16 ^ ymm18))
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u]
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u]
@@ -11816,27 +11816,27 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa64 352(%rdi), %ymm17
; AVX512-NEXT: vmovdqa64 320(%rdi), %ymm28
; AVX512-NEXT: vmovdqa %ymm14, %ymm7
-; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm28, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm17 ^ (ymm7 & (ymm28 ^ ymm17))
; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4,5],ymm8[6],ymm7[7,8,9],ymm8[10],ymm7[11,12,13],ymm8[14],ymm7[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $248, %ymm23, %ymm2, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 | (ymm2 & ymm23)
; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm13 ^ (ymm2 & (ymm12 ^ ymm13))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm15
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm15, %xmm2
; AVX512-NEXT: vmovdqa64 %ymm24, %ymm15
; AVX512-NEXT: vmovdqa64 %ymm24, %ymm5
-; AVX512-NEXT: vpternlogq $202, %ymm19, %ymm31, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm19 ^ (ymm15 & (ymm31 ^ ymm19))
; AVX512-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm11[2],ymm15[3,4,5],ymm11[6],ymm15[7,8,9],ymm11[10],ymm15[11,12,13],ymm11[14],ymm15[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm15
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm2 & ~mem)
; AVX512-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm29, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm21 ^ (ymm2 & (ymm29 ^ ymm21))
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
@@ -11854,23 +11854,23 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
; AVX512-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm24
-; AVX512-NEXT: vpternlogq $184, %zmm15, %zmm4, %zmm24
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm24 = zmm24 ^ (zmm4 & (zmm24 ^ zmm15))
; AVX512-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm13 ^ (ymm0 & (ymm12 ^ ymm13))
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm19, %ymm31, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm19 ^ (ymm2 & (ymm31 ^ ymm19))
; AVX512-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm11[3],ymm2[4,5],ymm11[6],ymm2[7,8,9,10],ymm11[11],ymm2[12,13],ymm11[14],ymm2[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm3 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512-NEXT: vpternlogq $248, %ymm3, %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 | (ymm0 & ymm3)
; AVX512-NEXT: vmovdqa %ymm3, %ymm15
; AVX512-NEXT: vmovdqa %ymm5, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm21, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm29 ^ (ymm0 & (ymm21 ^ ymm29))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
@@ -11893,21 +11893,21 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm3, %xmm6, %xmm3
; AVX512-NEXT: vinserti32x4 $2, %xmm3, %zmm0, %zmm25
; AVX512-NEXT: vpmovsxdq {{.*#+}} zmm20 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm20, %zmm25
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm25 = zmm2 ^ (zmm20 & (zmm25 ^ zmm2))
; AVX512-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm13 ^ (ymm0 & (ymm12 ^ ymm13))
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm19, %ymm31, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm19 ^ (ymm2 & (ymm31 ^ ymm19))
; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm11[0],ymm2[1,2],ymm11[3],ymm2[4,5,6],ymm11[7,8],ymm2[9,10],ymm11[11],ymm2[12,13,14],ymm11[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm15, %ymm0, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm15)
; AVX512-NEXT: vmovdqa %ymm15, %ymm11
; AVX512-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm21, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm29 ^ (ymm0 & (ymm21 ^ ymm29))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
@@ -11919,172 +11919,172 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vmovdqa %xmm5, %xmm10
; AVX512-NEXT: vpor %xmm6, %xmm15, %xmm6
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (mem & (ymm6 ^ ymm0))
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm0, %xmm15, %xmm0
; AVX512-NEXT: vmovdqa64 416(%rdi), %ymm26
; AVX512-NEXT: vinserti32x4 $2, %xmm0, %zmm6, %zmm30
; AVX512-NEXT: vmovdqa64 384(%rdi), %ymm27
-; AVX512-NEXT: vpternlogq $226, %zmm3, %zmm20, %zmm30
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm30 = zmm3 ^ (zmm20 & (zmm30 ^ zmm3))
; AVX512-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512-NEXT: vpternlogq $184, %ymm8, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm1 & (ymm0 ^ ymm8))
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm20
; AVX512-NEXT: vpmovsxwd {{.*#+}} zmm8 = [4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0,0,0,0,0]
-; AVX512-NEXT: vpternlogq $184, %zmm22, %zmm8, %zmm20
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm8 & (zmm20 ^ zmm22))
; AVX512-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm16, %ymm18, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm16 ^ (ymm0 & (ymm18 ^ ymm16))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm28, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm17 ^ (ymm3 & (ymm28 ^ ymm17))
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7,8,9,10],ymm6[11],ymm3[12,13],ymm6[14],ymm3[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $248, %ymm23, %ymm0, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm23)
; AVX512-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10]
; AVX512-NEXT: vpor %xmm6, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm1 & (ymm0 ^ ymm3))
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm22
-; AVX512-NEXT: vpternlogq $184, %zmm24, %zmm8, %zmm22
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm8 & (zmm22 ^ zmm24))
; AVX512-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm16, %ymm18, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm16 ^ (ymm0 & (ymm18 ^ ymm16))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %ymm7, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm28, %ymm17, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm28 ^ (ymm3 & (ymm17 ^ ymm28))
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm6[0],ymm3[1,2],ymm6[3],ymm3[4,5,6],ymm6[7,8],ymm3[9,10],ymm6[11],ymm3[12,13,14],ymm6[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $248, %ymm23, %ymm0, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm23)
; AVX512-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11]
; AVX512-NEXT: vpor %xmm6, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm1 & (ymm0 ^ ymm3))
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm24
-; AVX512-NEXT: vpternlogq $184, %zmm25, %zmm8, %zmm24
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm24 = zmm24 ^ (zmm8 & (zmm24 ^ zmm25))
; AVX512-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm28, %ymm17, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm28 ^ (ymm0 & (ymm17 ^ ymm28))
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3],ymm3[4],ymm0[5,6],ymm3[7,8],ymm0[9,10,11],ymm3[12],ymm0[13,14],ymm3[15]
; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm16, %ymm18, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm16 ^ (ymm2 & (ymm18 ^ ymm16))
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512-NEXT: vpternlogq $236, %ymm23, %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ymm23) | ymm0
; AVX512-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $184, %ymm2, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm1 & (ymm0 ^ ymm2))
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm25
-; AVX512-NEXT: vpternlogq $184, %zmm30, %zmm8, %zmm25
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm8 & (zmm25 ^ zmm30))
; AVX512-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm28, %ymm17, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm28 ^ (ymm0 & (ymm17 ^ ymm28))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512-NEXT: vmovdqa %ymm4, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm16, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm16 ^ ymm18))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT: vpternlogq $236, %ymm23, %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ymm23) | ymm0
; AVX512-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm30
-; AVX512-NEXT: vpternlogq $184, %ymm2, %ymm1, %ymm30
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm30 = ymm30 ^ (ymm1 & (ymm30 ^ ymm2))
; AVX512-NEXT: vmovdqa %ymm4, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm28, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm17 ^ (ymm0 & (ymm28 ^ ymm17))
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7,8],ymm2[9],ymm0[10,11,12],ymm2[13],ymm0[14,15]
; AVX512-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm16, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm16 ^ ymm18))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,128,128,128,128,128,128,128,128,128,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512-NEXT: vpternlogq $220, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ~mem) | ymm0
; AVX512-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
; AVX512-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm23
-; AVX512-NEXT: vpternlogq $184, %ymm2, %ymm1, %ymm23
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm23 = ymm23 ^ (ymm1 & (ymm23 ^ ymm2))
; AVX512-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm13 ^ ymm12))
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm26 ^ (ymm4 & (ymm27 ^ ymm26))
; AVX512-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm12 ^ (ymm2 & (ymm13 ^ ymm12))
; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vmovdqa %ymm9, %ymm3
; AVX512-NEXT: vmovdqa %ymm9, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm19, %ymm31, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm12 ^ (ymm9 & (ymm13 ^ ymm12))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm19 ^ (ymm3 & (ymm31 ^ ymm19))
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm9[6,13],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[4,11],zero,zero,xmm9[0,7,14,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm6, %xmm9, %xmm6
; AVX512-NEXT: vmovdqa %ymm14, %ymm12
-; AVX512-NEXT: vpternlogq $226, %ymm18, %ymm14, %ymm16
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm16 = ymm18 ^ (ymm14 & (ymm16 ^ ymm18))
; AVX512-NEXT: vmovdqa %ymm7, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm31, %ymm19, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm31, %ymm19, %ymm14
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm31 ^ (ymm9 & (ymm19 ^ ymm31))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm14 = ymm31 ^ (ymm14 & (ymm19 ^ ymm31))
; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm8[0],ymm3[1,2,3],ymm8[4],ymm3[5,6],ymm8[7,8],ymm3[9,10,11],ymm8[12],ymm3[13,14],ymm8[15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vmovdqa %ymm11, %ymm1
-; AVX512-NEXT: vpternlogq $248, %ymm11, %ymm0, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm11)
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6,7,8],ymm8[9],ymm9[10,11],ymm8[12],ymm9[13,14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm1, %ymm2, %ymm11
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm2 & ymm1)
; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0],ymm8[1],ymm14[2,3,4],ymm8[5],ymm14[6,7,8],ymm8[9],ymm14[10,11,12],ymm8[13],ymm14[14,15]
; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpternlogq $248, %ymm1, %ymm6, %ymm9
-; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm21, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm6 & ymm1)
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm29 ^ (ymm12 & (ymm21 ^ ymm29))
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm12[u,u,2,9],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm12, %xmm2
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
@@ -12098,11 +12098,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512-NEXT: vpmovsxdq {{.*#+}} ymm1 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512-NEXT: vpternlogq $184, %ymm0, %ymm1, %ymm2
-; AVX512-NEXT: vpternlogq $226, %ymm17, %ymm7, %ymm28
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm1 & (ymm2 ^ ymm0))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm28 = ymm17 ^ (ymm7 & (ymm28 ^ ymm17))
; AVX512-NEXT: vmovd {{.*#+}} xmm10 = [4,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512-NEXT: vpternlogq $202, %ymm29, %ymm21, %ymm15
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm29, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm15 = ymm29 ^ (ymm15 & (ymm21 ^ ymm29))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm21 ^ (ymm7 & (ymm29 ^ ymm21))
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX512-NEXT: vpshufb %xmm10, %xmm8, %xmm0
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
@@ -12110,7 +12110,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-NEXT: vpternlogq $184, %zmm3, %zmm0, %zmm2
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm0 & (zmm2 ^ zmm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm15[u,u,3,10],zero,zero,zero,xmm15[6,13],zero,zero,xmm15[u,u,u,u,u]
; AVX512-NEXT: vextracti128 $1, %ymm15, %xmm6
; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
@@ -12121,7 +12121,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm6, %xmm12, %xmm6
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm1, %ymm6
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 ^ (ymm1 & (ymm6 ^ ymm3))
; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm3
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,11],zero,zero,xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u]
@@ -12132,17 +12132,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpor %xmm13, %xmm12, %xmm12
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-NEXT: vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512-NEXT: vpternlogq $184, %ymm3, %ymm1, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 ^ (ymm1 & (ymm12 ^ ymm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm14[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm13 = xmm8[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm6, %zmm3
-; AVX512-NEXT: vpternlogq $184, %zmm11, %zmm0, %zmm3
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (zmm0 & (zmm3 ^ zmm11))
; AVX512-NEXT: vpshufb %xmm10, %xmm14, %xmm6
; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm8[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm12, %zmm6
-; AVX512-NEXT: vpternlogq $184, %zmm9, %zmm0, %zmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm0 & (zmm6 ^ zmm9))
; AVX512-NEXT: vinserti64x4 $1, %ymm30, %zmm0, %zmm0
; AVX512-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512-NEXT: kmovw %eax, %k1
@@ -12158,7 +12158,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vextracti32x4 $1, %ymm16, %xmm1
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10,u,u,u]
; AVX512-NEXT: vpor %xmm5, %xmm1, %xmm1
-; AVX512-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & mem) | ymm0
; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,0,7,14],zero,zero,xmm4[3,10],zero,zero,zero
@@ -12188,7 +12188,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %ymm30
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm10 ^ (ymm1 & (ymm19 ^ ymm10))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
@@ -12196,16 +12196,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa64 96(%rdi), %ymm27
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm30, %ymm27, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm30 ^ (ymm2 & (ymm27 ^ ymm30))
; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm9
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7,8,9],ymm9[10],ymm2[11,12],ymm9[13],ymm2[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm1 & mem)
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %ymm31
; AVX512-FCP-NEXT: vmovdqa64 160(%rdi), %ymm29
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm31, %ymm29, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm31 ^ (ymm1 & (ymm29 ^ ymm31))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
@@ -12224,11 +12224,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm20
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm20
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm2 & (zmm20 ^ zmm4))
; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %ymm11
; AVX512-FCP-NEXT: vmovdqa64 256(%rdi), %ymm26
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm4
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm11 ^ (ymm4 & (ymm26 ^ ymm11))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm4, %xmm4
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u]
@@ -12236,27 +12236,27 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa 352(%rdi), %ymm15
; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm16
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm15 ^ (ymm5 & (ymm16 ^ ymm15))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm5[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3,4,5],ymm14[6],ymm5[7,8,9],ymm14[10],ymm5[11,12,13],ymm14[14],ymm5[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm21, %ymm13, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 | (ymm13 & ymm21)
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm10 ^ (ymm13 & (ymm19 ^ ymm10))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[1,8,15],zero,zero,xmm13[4,11],zero,zero,xmm13[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm13, %xmm3
; AVX512-FCP-NEXT: vmovdqa %ymm7, %ymm1
; AVX512-FCP-NEXT: vmovdqa %ymm7, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm27, %ymm30, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm27 ^ (ymm13 & (ymm30 ^ ymm27))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm9[2],ymm13[3,4,5],ymm9[6],ymm13[7,8,9],ymm9[10],ymm13[11,12,13],ymm9[14],ymm13[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm13
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 | (ymm3 & ~mem)
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm31, %ymm29, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm31 ^ (ymm3 & (ymm29 ^ ymm31))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u]
@@ -12272,21 +12272,21 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm22
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm13, %zmm2, %zmm22
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm2 & (zmm22 ^ zmm13))
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm10 ^ (ymm2 & (ymm19 ^ ymm10))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm27, %ymm30, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm27 ^ (ymm3 & (ymm30 ^ ymm27))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3],ymm3[4,5],ymm9[6],ymm3[7,8,9,10],ymm9[11],ymm3[12,13],ymm9[14],ymm3[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm2 & ymm17)
; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm31, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm29 ^ (ymm2 & (ymm31 ^ ymm29))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[1,8,15,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
@@ -12304,20 +12304,20 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm2, %zmm23
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} zmm18 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm18, %zmm23
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm3 ^ (zmm18 & (zmm23 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm10 ^ (ymm2 & (ymm19 ^ ymm10))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm27, %ymm30, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm27 ^ (ymm3 & (ymm30 ^ ymm27))
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm9[0],ymm3[1,2],ymm9[3],ymm3[4,5,6],ymm9[7,8],ymm3[9,10],ymm9[11],ymm3[12,13,14],ymm9[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm2 & ymm17)
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm31, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm29 ^ (ymm2 & (ymm31 ^ ymm29))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u]
@@ -12332,172 +12332,172 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
; AVX512-FCP-NEXT: vpor %xmm8, %xmm13, %xmm8
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 ^ (mem & (ymm8 ^ ymm7))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[3,10],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,xmm1[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm13, %xmm7
; AVX512-FCP-NEXT: vmovdqa64 416(%rdi), %ymm24
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm8, %zmm28
; AVX512-FCP-NEXT: vmovdqa64 384(%rdi), %ymm25
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm18, %zmm28
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm28 = zmm3 ^ (zmm18 & (zmm28 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero
; AVX512-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm5, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm0 & (ymm3 ^ ymm5))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm18
; AVX512-FCP-NEXT: vpmovsxwd {{.*#+}} zmm5 = [4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0,0,0,0,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm20, %zmm5, %zmm18
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 ^ (zmm5 & (zmm18 ^ zmm20))
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm26 ^ (ymm3 & (ymm11 ^ ymm26))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm15 ^ (ymm7 & (ymm16 ^ ymm15))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7,8,9,10],ymm8[11],ymm7[12,13],ymm8[14],ymm7[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm21, %ymm3, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm3 & ymm21)
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm7, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm0 & (ymm3 ^ ymm7))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm20
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm22, %zmm5, %zmm20
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm5 & (zmm20 ^ zmm22))
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm26 ^ (ymm3 & (ymm11 ^ ymm26))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm16 ^ (ymm7 & (ymm15 ^ ymm16))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2],ymm8[3],ymm7[4,5,6],ymm8[7,8],ymm7[9,10],ymm8[11],ymm7[12,13,14],ymm8[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm21, %ymm3, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm3 & ymm21)
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm7, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm0 & (ymm3 ^ ymm7))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm22
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm23, %zmm5, %zmm22
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm5 & (zmm22 ^ zmm23))
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm16 ^ (ymm3 & (ymm15 ^ ymm16))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1,2,3],ymm7[4],ymm3[5,6],ymm7[7,8],ymm3[9,10,11],ymm7[12],ymm3[13,14],ymm7[15]
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm26 ^ (ymm3 & (ymm11 ^ ymm26))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm21, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm21) | ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
; AVX512-FCP-NEXT: vpor %xmm7, %xmm2, %xmm2
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm3, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm0 & (ymm2 ^ ymm3))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm23
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm28, %zmm5, %zmm23
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm5 & (zmm23 ^ zmm28))
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm16 ^ (ymm2 & (ymm15 ^ ymm16))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6,7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm11 ^ (ymm3 & (ymm26 ^ ymm11))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u]
; AVX512-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512-FCP-NEXT: vpternlogq $236, %ymm21, %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm21) | ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm5
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
; AVX512-FCP-NEXT: vpor %xmm5, %xmm2, %xmm2
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm3, %ymm0, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (ymm0 & (ymm4 ^ ymm3))
; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512-FCP-NEXT: vmovdqa64 %ymm1, %ymm21
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm16 ^ ymm15))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7,8],ymm3[9],ymm2[10,11,12],ymm3[13],ymm2[14,15]
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm11 ^ (ymm3 & (ymm26 ^ ymm11))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u]
; AVX512-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,128,128,128,128,128,128,128,128,128,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $220, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~mem) | ymm2
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm2, %xmm2
; AVX512-FCP-NEXT: vinserti32x4 $1, %xmm2, %ymm0, %ymm28
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm3, %ymm0, %ymm28
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm28 = ymm28 ^ (ymm0 & (ymm28 ^ ymm3))
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm19 ^ (ymm2 & (ymm10 ^ ymm19))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm21
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm21 = ymm24 ^ (ymm21 & (ymm25 ^ ymm24))
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm19 ^ (ymm3 & (ymm10 ^ ymm19))
; AVX512-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm7
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm27, %ymm30, %ymm7
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm19 ^ (ymm6 & (ymm10 ^ ymm19))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm27 ^ (ymm7 & (ymm30 ^ ymm27))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm6[6,13],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[4,11],zero,zero,xmm6[0,7,14,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm6, %xmm8, %xmm8
; AVX512-FCP-NEXT: vmovdqa %ymm12, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm11, %ymm12, %ymm26
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm26 = ymm11 ^ (ymm12 & (ymm26 ^ ymm11))
; AVX512-FCP-NEXT: vmovdqa %ymm14, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm30, %ymm27, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm30, %ymm27, %ymm12
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm30 ^ (ymm6 & (ymm27 ^ ymm30))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm30 ^ (ymm12 & (ymm27 ^ ymm30))
; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm0
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1,2,3],ymm9[4],ymm7[5,6],ymm9[7,8],ymm7[9,10,11],ymm9[12],ymm7[13,14],ymm9[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm2 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm6[0],ymm0[1],ymm6[2,3],ymm0[4],ymm6[5,6,7,8],ymm0[9],ymm6[10,11],ymm0[12],ymm6[13,14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm3, %ymm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm6 | (ymm3 & ymm17)
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7,8],ymm0[9],ymm12[10,11,12],ymm0[13],ymm12[14,15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512-FCP-NEXT: vpternlogq $248, %ymm17, %ymm8, %ymm7
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm31, %ymm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm8 & ymm17)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm10 = ymm29 ^ (ymm10 & (ymm31 ^ ymm29))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm10[u,u,2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm10, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u,u,u]
@@ -12511,11 +12511,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512-FCP-NEXT: vpmovsxdq {{.*#+}} ymm11 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm2, %ymm11, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm15, %ymm14, %ymm16
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm11 & (ymm3 ^ ymm2))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm16 = ymm15 ^ (ymm14 & (ymm16 ^ ymm15))
; AVX512-FCP-NEXT: vmovd {{.*#+}} xmm8 = [4,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm29, %ymm31, %ymm13
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm31, %ymm29, %ymm14
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm29 ^ (ymm13 & (ymm31 ^ ymm29))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm31 ^ (ymm14 & (ymm29 ^ ymm31))
; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm0, %xmm2
; AVX512-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
@@ -12523,7 +12523,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm9, %zmm3, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm3 & (zmm2 ^ zmm9))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm13[u,u,3,10],zero,zero,zero,xmm13[6,13],zero,zero,xmm13[u,u,u,u,u]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm13, %xmm10
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u],zero,zero,xmm10[1,8,15],zero,zero,xmm10[4,11,u,u,u,u,u]
@@ -12533,7 +12533,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm5, %xmm10, %xmm5
; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm9, %ymm11, %ymm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (ymm11 & (ymm5 ^ ymm9))
; AVX512-FCP-NEXT: vmovdqa %ymm11, %ymm13
; AVX512-FCP-NEXT: vextracti128 $1, %ymm14, %xmm9
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[u,u],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u]
@@ -12545,17 +12545,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512-FCP-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
; AVX512-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512-FCP-NEXT: vpternlogq $184, %ymm9, %ymm13, %ymm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 ^ (ymm13 & (ymm11 ^ ymm9))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm9 = xmm15[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm5, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm6, %zmm3, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm3 & (zmm5 ^ zmm6))
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm15, %xmm6
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm11, %zmm6
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm7, %zmm3, %zmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (zmm3 & (zmm6 ^ zmm7))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm3
; AVX512-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512-FCP-NEXT: kmovw %eax, %k1
@@ -12571,7 +12571,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vextracti32x4 $1, %ymm26, %xmm1
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm4, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & mem) | ymm3
; AVX512-FCP-NEXT: vmovdqa64 %ymm21, %ymm0
; AVX512-FCP-NEXT: vextracti32x4 $1, %ymm21, %xmm3
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
@@ -12603,7 +12603,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %ymm31
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm13 ^ (ymm1 & (ymm12 ^ ymm13))
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
@@ -12611,16 +12611,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa64 96(%rdi), %ymm28
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm31, %ymm28, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm31 ^ (ymm2 & (ymm28 ^ ymm31))
; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm11
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4],ymm11[5],ymm2[6,7,8,9],ymm11[10],ymm2[11,12],ymm11[13],ymm2[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 | (ymm1 & mem)
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm3
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm0, %ymm3, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm1 & (ymm3 ^ ymm0))
; AVX512DQ-NEXT: vmovdqa %ymm3, %ymm4
; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm25
; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm3
@@ -12647,11 +12647,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm6, %xmm7, %xmm6
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm6, %zmm1, %zmm22
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm2, %zmm19, %zmm22
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm19 & (zmm22 ^ zmm2))
; AVX512DQ-NEXT: vmovdqa64 288(%rdi), %ymm18
; AVX512DQ-NEXT: vmovdqa64 256(%rdi), %ymm17
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm17, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm17 ^ ymm18))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u]
@@ -12659,28 +12659,28 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vmovdqa64 352(%rdi), %ymm21
; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %ymm16
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm7
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm16, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm21 ^ (ymm7 & (ymm16 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3,4,5],ymm8[6],ymm7[7,8,9],ymm8[10],ymm7[11,12,13],ymm8[14],ymm7[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm24 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm24, %ymm2, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 | (ymm2 & ymm24)
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm13 ^ (ymm2 & (ymm12 ^ ymm13))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm15
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm15, %xmm2
; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm15
; AVX512DQ-NEXT: vmovdqa64 %ymm23, %ymm29
-; AVX512DQ-NEXT: vpternlogq $202, %ymm28, %ymm31, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm28 ^ (ymm15 & (ymm31 ^ ymm28))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm11[2],ymm15[3,4,5],ymm11[6],ymm15[7,8,9],ymm11[10],ymm15[11,12,13],ymm11[14],ymm15[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm15
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm15 | (ymm2 & ~mem)
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm2
; AVX512DQ-NEXT: vmovdqa %ymm4, %ymm6
-; AVX512DQ-NEXT: vpternlogq $202, %ymm25, %ymm4, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm4 ^ ymm25))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
@@ -12698,25 +12698,25 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm2, %zmm0, %zmm23
-; AVX512DQ-NEXT: vpternlogq $184, %zmm15, %zmm19, %zmm23
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm19 & (zmm23 ^ zmm15))
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm13 ^ (ymm0 & (ymm12 ^ ymm13))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm28, %ymm31, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm28 ^ (ymm2 & (ymm31 ^ ymm28))
; AVX512DQ-NEXT: vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm11[3],ymm2[4,5],ymm11[6],ymm2[7,8,9,10],ymm11[11],ymm2[12,13],ymm11[14],ymm2[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm3 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm3, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 | (ymm0 & ymm3)
; AVX512DQ-NEXT: vmovdqa64 %ymm3, %ymm27
; AVX512DQ-NEXT: vmovdqa64 %ymm29, %ymm0
; AVX512DQ-NEXT: vmovdqa64 %ymm25, %ymm15
; AVX512DQ-NEXT: vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm25, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm6 ^ (ymm0 & (ymm25 ^ ymm6))
; AVX512DQ-NEXT: vmovdqa64 %ymm6, %ymm19
; AVX512DQ-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -12740,21 +12740,21 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm3, %zmm0, %zmm25
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} zmm20 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm20, %zmm25
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm25 = zmm2 ^ (zmm20 & (zmm25 ^ zmm2))
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm12, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm13 ^ (ymm0 & (ymm12 ^ ymm13))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm28, %ymm31, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm28 ^ (ymm2 & (ymm31 ^ ymm28))
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm11[0],ymm2[1,2],ymm11[3],ymm2[4,5,6],ymm11[7,8],ymm2[9,10],ymm11[11],ymm2[12,13,14],ymm11[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm27, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm27)
; AVX512DQ-NEXT: vmovdqa64 %ymm27, %ymm11
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm19, %ymm15, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm19 ^ (ymm0 & (ymm15 ^ ymm19))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
@@ -12765,174 +12765,174 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = xmm6[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
; AVX512DQ-NEXT: vpor %xmm5, %xmm15, %xmm5
; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (mem & (ymm5 ^ ymm0))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm15 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm0, %xmm15, %xmm0
; AVX512DQ-NEXT: vmovdqa64 416(%rdi), %ymm26
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm0, %zmm5, %zmm30
; AVX512DQ-NEXT: vmovdqa64 384(%rdi), %ymm27
-; AVX512DQ-NEXT: vpternlogq $226, %zmm3, %zmm20, %zmm30
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm30 = zmm3 ^ (zmm20 & (zmm30 ^ zmm3))
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm29 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-NEXT: vpternlogq $184, %ymm8, %ymm29, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm29 & (ymm0 ^ ymm8))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm20
; AVX512DQ-NEXT: vpmovsxwd {{.*#+}} zmm8 = [4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0,0,0,0,0]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm22, %zmm8, %zmm20
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm8 & (zmm20 ^ zmm22))
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm18, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm17 ^ (ymm0 & (ymm18 ^ ymm17))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm16, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm21 ^ (ymm3 & (ymm16 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm24, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm24)
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10]
; AVX512DQ-NEXT: vpor %xmm5, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm29, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm29 & (ymm0 ^ ymm3))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm22
-; AVX512DQ-NEXT: vpternlogq $184, %zmm23, %zmm8, %zmm22
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm8 & (zmm22 ^ zmm23))
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm18, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm17 ^ (ymm0 & (ymm18 ^ ymm17))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm16, %ymm21, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm16 ^ (ymm3 & (ymm21 ^ ymm16))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1,2],ymm5[3],ymm3[4,5,6],ymm5[7,8],ymm3[9,10],ymm5[11],ymm3[12,13,14],ymm5[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $248, %ymm24, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm0 & ymm24)
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm26 ^ (ymm0 & (ymm27 ^ ymm26))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11]
; AVX512DQ-NEXT: vpor %xmm5, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm29, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm29 & (ymm0 ^ ymm3))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm23
-; AVX512DQ-NEXT: vpternlogq $184, %zmm25, %zmm8, %zmm23
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm23 = zmm23 ^ (zmm8 & (zmm23 ^ zmm25))
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm16, %ymm21, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm16 ^ (ymm0 & (ymm21 ^ ymm16))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3],ymm3[4],ymm0[5,6],ymm3[7,8],ymm0[9,10,11],ymm3[12],ymm0[13,14],ymm3[15]
; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm18, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm17 ^ (ymm2 & (ymm18 ^ ymm17))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX512DQ-NEXT: vpternlogq $236, %ymm24, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ymm24) | ymm0
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $184, %ymm2, %ymm29, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (ymm29 & (ymm0 ^ ymm2))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm25
-; AVX512DQ-NEXT: vpternlogq $184, %zmm30, %zmm8, %zmm25
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm25 = zmm25 ^ (zmm8 & (zmm25 ^ zmm30))
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm16, %ymm21, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm16 ^ (ymm0 & (ymm21 ^ ymm16))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} ymm19 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm17, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm17 ^ ymm18))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-NEXT: vpternlogq $236, %ymm24, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ymm24) | ymm0
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm24
-; AVX512DQ-NEXT: vpternlogq $184, %ymm2, %ymm29, %ymm24
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm24 = ymm24 ^ (ymm29 & (ymm24 ^ ymm2))
; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm16, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm21 ^ (ymm0 & (ymm16 ^ ymm21))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7,8],ymm2[9],ymm0[10,11,12],ymm2[13],ymm0[14,15]
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm17, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm18 ^ (ymm2 & (ymm17 ^ ymm18))
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [u,u,u,u,128,128,128,128,128,128,128,128,128,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpternlogq $220, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = (ymm2 & ~mem) | ymm0
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm27, %ymm26, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm27 ^ (ymm0 & (ymm26 ^ ymm27))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
; AVX512DQ-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm30
-; AVX512DQ-NEXT: vpternlogq $184, %ymm2, %ymm29, %ymm30
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm30 = ymm30 ^ (ymm29 & (ymm30 ^ ymm2))
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm13 ^ ymm12))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vporq %xmm2, %xmm0, %xmm29
-; AVX512DQ-NEXT: vpternlogq $202, %ymm26, %ymm27, %ymm19
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm19 = ymm26 ^ (ymm19 & (ymm27 ^ ymm26))
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm12 ^ (ymm0 & (ymm13 ^ ymm12))
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm2
; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm13, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm28, %ymm31, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm12 ^ (ymm9 & (ymm13 ^ ymm12))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm28 ^ (ymm2 & (ymm31 ^ ymm28))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm9[6,13],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm9, %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX512DQ-NEXT: vmovdqa %ymm14, %ymm5
-; AVX512DQ-NEXT: vpternlogq $226, %ymm18, %ymm14, %ymm17
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm17 = ymm18 ^ (ymm14 & (ymm17 ^ ymm18))
; AVX512DQ-NEXT: vmovdqa %ymm7, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm31, %ymm28, %ymm9
-; AVX512DQ-NEXT: vpternlogq $202, %ymm31, %ymm28, %ymm14
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm31 ^ (ymm9 & (ymm28 ^ ymm31))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm14 = ymm31 ^ (ymm14 & (ymm28 ^ ymm31))
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6],ymm4[7,8],ymm2[9,10,11],ymm4[12],ymm2[13,14],ymm4[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-NEXT: vmovdqa %ymm11, %ymm1
-; AVX512DQ-NEXT: vpternlogq $248, %ymm11, %ymm29, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm12 | (ymm29 & ymm11)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm9[0],ymm4[1],ymm9[2,3],ymm4[4],ymm9[5,6,7,8],ymm4[9],ymm9[10,11],ymm4[12],ymm9[13,14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm1, %ymm0, %ymm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 | (ymm0 & ymm1)
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm14[0],ymm4[1],ymm14[2,3,4],ymm4[5],ymm14[6,7,8],ymm4[9],ymm14[10,11,12],ymm4[13],ymm14[14,15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-NEXT: vpternlogq $248, %ymm1, %ymm3, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm3 & ymm1)
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX512DQ-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm6 ^ (ymm5 & (ymm13 ^ ymm6))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm5[u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm5, %xmm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
@@ -12946,11 +12946,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-NEXT: vpmovsxdq {{.*#+}} ymm18 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512DQ-NEXT: vpternlogq $184, %ymm0, %ymm18, %ymm2
-; AVX512DQ-NEXT: vpternlogq $226, %ymm21, %ymm7, %ymm16
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm18 & (ymm2 ^ ymm0))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm16 = ymm21 ^ (ymm7 & (ymm16 ^ ymm21))
; AVX512DQ-NEXT: vmovd {{.*#+}} xmm10 = [4,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm13, %ymm15
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm6, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm15 = ymm6 ^ (ymm15 & (ymm13 ^ ymm6))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm13 ^ (ymm7 & (ymm6 ^ ymm13))
; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX512DQ-NEXT: vpshufb %xmm10, %xmm14, %xmm0
; AVX512DQ-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
@@ -12958,7 +12958,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm2
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm12, %zmm0, %zmm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm0 & (zmm2 ^ zmm12))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm15[u,u,3,10],zero,zero,zero,xmm15[6,13],zero,zero,xmm15[u,u,u,u,u]
; AVX512DQ-NEXT: vextracti128 $1, %ymm15, %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[1,8,15],zero,zero,xmm5[4,11,u,u,u,u,u]
@@ -12968,7 +12968,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm5, %xmm8, %xmm5
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm18, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (ymm18 & (ymm5 ^ ymm3))
; AVX512DQ-NEXT: vextracti128 $1, %ymm7, %xmm3
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,11],zero,zero,xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u]
@@ -12979,17 +12979,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpor %xmm12, %xmm8, %xmm8
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-NEXT: vpternlogq $184, %ymm3, %ymm18, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 ^ (ymm18 & (ymm8 ^ ymm3))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm13[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm14[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm5, %zmm3
-; AVX512DQ-NEXT: vpternlogq $184, %zmm11, %zmm0, %zmm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (zmm0 & (zmm3 ^ zmm11))
; AVX512DQ-NEXT: vpshufb %xmm10, %xmm13, %xmm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512DQ-NEXT: vpternlogq $184, %zmm9, %zmm0, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm0 & (zmm5 ^ zmm9))
; AVX512DQ-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vinserti32x8 $1, %ymm24, %zmm0, %zmm2 {%k1}
@@ -13003,7 +13003,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vextracti32x4 $1, %ymm17, %xmm1
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10,u,u,u]
; AVX512DQ-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & mem) | ymm0
; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm4
; AVX512DQ-NEXT: vextracti32x4 $1, %ymm19, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
@@ -13034,7 +13034,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %ymm30
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm11 ^ (ymm1 & (ymm26 ^ ymm11))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
@@ -13042,16 +13042,16 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa64 96(%rdi), %ymm29
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm30, %ymm29, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm30 ^ (ymm2 & (ymm29 ^ ymm30))
; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm9
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm9[2],ymm2[3,4],ymm9[5],ymm2[6,7,8,9],ymm9[10],ymm2[11,12],ymm9[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 | (ymm1 & mem)
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa64 160(%rdi), %ymm31
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm31, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm0 ^ (ymm1 & (ymm31 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm24
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u,u,u]
@@ -13071,11 +13071,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm5, %xmm2
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm2, %zmm1, %zmm20
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm2 & (zmm20 ^ zmm4))
; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %ymm10
; AVX512DQ-FCP-NEXT: vmovdqa64 256(%rdi), %ymm19
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm4
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm10 ^ (ymm4 & (ymm19 ^ ymm10))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm4, %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u]
@@ -13083,27 +13083,27 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa 352(%rdi), %ymm15
; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm16
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm15 ^ (ymm5 & (ymm16 ^ ymm15))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm5[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3,4,5],ymm14[6],ymm5[7,8,9],ymm14[10],ymm5[11,12,13],ymm14[14],ymm5[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm23, %ymm13, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 | (ymm13 & ymm23)
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm11 ^ (ymm13 & (ymm26 ^ ymm11))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[1,8,15],zero,zero,xmm13[4,11],zero,zero,xmm13[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm13, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa %ymm7, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm30, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm29 ^ (ymm13 & (ymm30 ^ ymm29))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm9[2],ymm13[3,4,5],ymm9[6],ymm13[7,8,9],ymm9[10],ymm13[11,12,13],ymm9[14],ymm13[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm13
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm13 | (ymm3 & ~mem)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm31, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm31 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u]
@@ -13119,22 +13119,22 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm21
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm13, %zmm2, %zmm21
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (zmm2 & (zmm21 ^ zmm13))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm26 ^ ymm11))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm30, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm29 ^ (ymm3 & (ymm30 ^ ymm29))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3],ymm3[4,5],ymm9[6],ymm3[7,8,9,10],ymm9[11],ymm3[12,13],ymm9[14],ymm3[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm17 = [18446744073709551615,255,18446744073709486080,18446744073709551615]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm2 & ymm17)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqu64 %ymm24, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm31, %ymm24, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm31 ^ (ymm2 & (ymm24 ^ ymm31))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[1,8,15,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
@@ -13152,20 +13152,20 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm8, %xmm7
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm2, %zmm22
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} zmm18 = [0,0,18446744073709486080,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm18, %zmm22
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm22 = zmm3 ^ (zmm18 & (zmm22 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm26, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm11 ^ (ymm2 & (ymm26 ^ ymm11))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm30, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm29 ^ (ymm3 & (ymm30 ^ ymm29))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm9[0],ymm3[1,2],ymm9[3],ymm3[4,5,6],ymm9[7,8],ymm3[9,10],ymm9[11],ymm3[12,13,14],ymm9[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm2 & ymm17)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm31, %ymm24, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm31 ^ (ymm2 & (ymm24 ^ ymm31))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u]
@@ -13180,173 +13180,173 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm8, %xmm13, %xmm8
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 ^ (mem & (ymm8 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm4[3,10],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = zero,zero,xmm1[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm13, %xmm7
; AVX512DQ-FCP-NEXT: vmovdqa64 416(%rdi), %ymm24
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm7, %zmm8, %zmm28
; AVX512DQ-FCP-NEXT: vmovdqa64 384(%rdi), %ymm25
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm18, %zmm28
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm28 = zmm3 ^ (zmm18 & (zmm28 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} ymm27 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm5, %ymm27, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm27 & (ymm3 ^ ymm5))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm18
; AVX512DQ-FCP-NEXT: vpmovsxwd {{.*#+}} zmm5 = [4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,4294967295,255,0,0,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm20, %zmm5, %zmm18
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 ^ (zmm5 & (zmm18 ^ zmm20))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm19 ^ (ymm3 & (ymm10 ^ ymm19))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm15 ^ (ymm7 & (ymm16 ^ ymm15))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7,8,9,10],ymm8[11],ymm7[12,13],ymm8[14],ymm7[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm23, %ymm3, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm3 & ymm23)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm7, %ymm27, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm27 & (ymm3 ^ ymm7))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm20
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm21, %zmm5, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (zmm5 & (zmm20 ^ zmm21))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm19 ^ (ymm3 & (ymm10 ^ ymm19))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm16 ^ (ymm7 & (ymm15 ^ ymm16))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2],ymm8[3],ymm7[4,5,6],ymm8[7,8],ymm7[9,10],ymm8[11],ymm7[12,13,14],ymm8[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm23, %ymm3, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm3 & ymm23)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm24 ^ (ymm3 & (ymm25 ^ ymm24))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm8, %xmm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm7, %ymm27, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (ymm27 & (ymm3 ^ ymm7))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm21
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm22, %zmm5, %zmm21
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm21 = zmm21 ^ (zmm5 & (zmm21 ^ zmm22))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm16 ^ (ymm3 & (ymm15 ^ ymm16))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1,2,3],ymm7[4],ymm3[5,6],ymm7[7,8],ymm3[9,10,11],ymm7[12],ymm3[13,14],ymm7[15]
; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm19, %ymm10, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm19 ^ (ymm3 & (ymm10 ^ ymm19))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm23, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm23) | ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm7
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm3, %ymm27, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (ymm27 & (ymm2 ^ ymm3))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm22
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm28, %zmm5, %zmm22
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm22 = zmm22 ^ (zmm5 & (zmm22 ^ zmm28))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm16, %ymm15, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm16 ^ (ymm2 & (ymm15 ^ ymm16))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6,7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm10 ^ (ymm3 & (ymm19 ^ ymm10))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpternlogq $236, %ymm23, %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ymm23) | ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm3, %ymm27, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (ymm27 & (ymm1 ^ ymm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm15, %ymm16, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm15 ^ (ymm2 & (ymm16 ^ ymm15))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7,8],ymm3[9],ymm2[10,11,12],ymm3[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm10, %ymm19, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm10 ^ (ymm3 & (ymm19 ^ ymm10))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm3, %xmm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm5 = [u,u,u,u,128,128,128,128,128,128,128,128,128,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $220, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = (ymm3 & ~mem) | ymm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm25, %ymm24, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm25 ^ (ymm2 & (ymm24 ^ ymm25))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vinserti32x4 $1, %xmm2, %ymm0, %ymm28
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm3, %ymm27, %ymm28
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm28 = ymm28 ^ (ymm27 & (ymm28 ^ ymm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm26 ^ (ymm2 & (ymm11 ^ ymm26))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vporq %xmm3, %xmm2, %xmm27
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm25, %ymm23
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm23 = ymm24 ^ (ymm23 & (ymm25 ^ ymm24))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm2 = ymm26 ^ (ymm2 & (ymm11 ^ ymm26))
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm26, %ymm11, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm29, %ymm30, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm6 = ymm26 ^ (ymm6 & (ymm11 ^ ymm26))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm29 ^ (ymm3 & (ymm30 ^ ymm29))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm6[6,13],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[4,11],zero,zero,xmm6[0,7,14,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm7, %xmm6, %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa %ymm12, %ymm8
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm10, %ymm12, %ymm19
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm19 = ymm10 ^ (ymm12 & (ymm19 ^ ymm10))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm14, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm30, %ymm29, %ymm7
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm30, %ymm29, %ymm12
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm30 ^ (ymm7 & (ymm29 ^ ymm30))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm12 = ymm30 ^ (ymm12 & (ymm29 ^ ymm30))
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm9[0],ymm3[1,2,3],ymm9[4],ymm3[5,6],ymm9[7,8],ymm3[9,10,11],ymm9[12],ymm3[13,14],ymm9[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm27, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 | (ymm27 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6,7,8],ymm9[9],ymm7[10,11],ymm9[12],ymm7[13,14,15]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm2, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm2 & ymm17)
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7,8],ymm0[9],ymm12[10,11,12],ymm0[13],ymm12[14,15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %ymm17, %ymm6, %ymm7
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm6 & ymm17)
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm31, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm31 ^ (ymm8 & (ymm0 ^ ymm31))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm8[u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm6
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u]
@@ -13360,11 +13360,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm8
; AVX512DQ-FCP-NEXT: vpmovsxdq {{.*#+}} ymm11 = [18446744073709551615,18446744073709551615,18446744073709551615,16777215]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm2, %ymm11, %ymm8
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm15, %ymm14, %ymm16
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm8 ^ (ymm11 & (ymm8 ^ ymm2))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm16 = ymm15 ^ (ymm14 & (ymm16 ^ ymm15))
; AVX512DQ-FCP-NEXT: vmovd {{.*#+}} xmm6 = [4,11,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm31, %ymm0, %ymm13
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm0, %ymm31, %ymm14
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm13 = ymm31 ^ (ymm13 & (ymm0 ^ ymm31))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm14 = ymm0 ^ (ymm14 & (ymm31 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm0, %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
@@ -13372,7 +13372,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm8, %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm3, %zmm8, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm8 & (zmm2 ^ zmm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm13[u,u,3,10],zero,zero,zero,xmm13[6,13],zero,zero,xmm13[u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm13, %xmm10
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[u,u],zero,zero,xmm10[1,8,15],zero,zero,xmm10[4,11,u,u,u,u,u]
@@ -13382,7 +13382,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm10, %xmm5
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm3, %ymm11, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm5 = ymm5 ^ (ymm11 & (ymm5 ^ ymm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm11, %ymm13
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm14, %xmm3
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u]
@@ -13394,17 +13394,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpor %xmm12, %xmm11, %xmm11
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %ymm3, %ymm13, %ymm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm11 = ymm11 ^ (ymm13 & (ymm11 ^ ymm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm15[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm5, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm9, %zmm8, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (zmm8 & (zmm3 ^ zmm9))
; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm15, %xmm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm6 = xmm0[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm11, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm7, %zmm8, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm8 & (zmm5 ^ zmm7))
; AVX512DQ-FCP-NEXT: movw $-512, %ax # imm = 0xFE00
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
; AVX512DQ-FCP-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm2 {%k1}
@@ -13418,7 +13418,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vextracti32x4 $1, %ymm19, %xmm1
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm6, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = (ymm1 & mem) | ymm4
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm23, %ymm0
; AVX512DQ-FCP-NEXT: vextracti32x4 $1, %ymm23, %xmm4
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index 9d1939f66219f9..a01c3da43c3398 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -542,7 +542,7 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX512-NEXT: vpermd %ymm2, %ymm4, %ymm4
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
@@ -569,7 +569,7 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,4,1,5,1,5,2,6]
; AVX512-FCP-NEXT: vpermd %ymm0, %ymm3, %ymm0
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5],zero,zero,ymm0[2,3,6,7],zero,zero,ymm0[8,9,12,13],zero,zero,ymm0[18,19,22,23],zero,zero,ymm0[24,25,28,29],zero,zero,ymm0[26,27]
-; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ~mem)
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovdqa %xmm1, 32(%rcx)
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rcx)
@@ -589,7 +589,7 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX512DQ-NEXT: vpermd %ymm2, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
@@ -616,7 +616,7 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,4,1,5,1,5,2,6]
; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm3, %ymm0
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5],zero,zero,ymm0[2,3,6,7],zero,zero,ymm0[8,9,12,13],zero,zero,ymm0[18,19,22,23],zero,zero,ymm0[24,25,28,29],zero,zero,ymm0[26,27]
-; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 | (ymm2 & ~mem)
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, 32(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rcx)
@@ -961,7 +961,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm0[10,11],zero,zero,zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18,19],zero,zero,zero,zero
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & mem)
; AVX512-NEXT: vprold $16, %xmm4, %xmm1
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
@@ -970,7 +970,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [5,5,0,6,6,0,7,7]
; AVX512-NEXT: vpermd %ymm0, %ymm3, %ymm0
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512-NEXT: vmovdqa %ymm0, 64(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512-NEXT: vzeroupper
@@ -1000,7 +1000,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm0[10,11],zero,zero,zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18,19],zero,zero,zero,zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & mem)
; AVX512-FCP-NEXT: vprold $16, %xmm4, %xmm1
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
@@ -1009,7 +1009,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [5,5,0,6,6,0,7,7]
; AVX512-FCP-NEXT: vpermd %ymm0, %ymm3, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vmovdqa %ymm0, 64(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512-FCP-NEXT: vzeroupper
@@ -1039,7 +1039,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm0[10,11],zero,zero,zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18,19],zero,zero,zero,zero
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & mem)
; AVX512DQ-NEXT: vprold $16, %xmm4, %xmm1
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
@@ -1048,7 +1048,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [5,5,0,6,6,0,7,7]
; AVX512DQ-NEXT: vpermd %ymm0, %ymm3, %ymm0
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vmovdqa %ymm0, 64(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512DQ-NEXT: vzeroupper
@@ -1078,7 +1078,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm0[10,11],zero,zero,zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18,19],zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & mem)
; AVX512DQ-FCP-NEXT: vprold $16, %xmm4, %xmm1
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
@@ -1087,7 +1087,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [5,5,0,6,6,0,7,7]
; AVX512DQ-FCP-NEXT: vpermd %ymm0, %ymm3, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, 64(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -1682,7 +1682,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512-NEXT: vpermd (%rdx), %zmm4, %zmm5
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-NEXT: vpshufb %ymm6, %ymm3, %ymm3
@@ -1707,7 +1707,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpermd %ymm8, %ymm4, %ymm4
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm11, %zmm4
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512-NEXT: vmovdqa (%rdi), %ymm3
; AVX512-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX512-NEXT: vmovdqa (%rsi), %ymm6
@@ -1727,7 +1727,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vpternlogq $248, %zmm3, %zmm0, %zmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-NEXT: vmovdqa64 %zmm4, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm5, 64(%rcx)
@@ -1760,7 +1760,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512-FCP-NEXT: vpermd (%rdx), %zmm4, %zmm5
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
@@ -1785,7 +1785,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm4
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm11, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm6
@@ -1805,7 +1805,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm3, %zmm0, %zmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 128(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 64(%rcx)
@@ -1838,7 +1838,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512DQ-NEXT: vpermd (%rdx), %zmm4, %zmm5
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm3, %ymm3
@@ -1863,7 +1863,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpermd %ymm8, %ymm4, %ymm4
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm11, %zmm4
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm3
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm6
@@ -1883,7 +1883,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vpternlogq $248, %zmm3, %zmm0, %zmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512DQ-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, 128(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm5, 64(%rcx)
@@ -1916,7 +1916,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512DQ-FCP-NEXT: vpermd (%rdx), %zmm4, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
@@ -1941,7 +1941,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm11, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm3
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm6
@@ -1961,7 +1961,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm3, %zmm0, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 128(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 64(%rcx)
@@ -3130,7 +3130,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vpandn %ymm3, %ymm15, %ymm3
; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
-; AVX512-NEXT: vpternlogq $248, %zmm15, %zmm10, %zmm3
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 | (zmm10 & zmm15)
; AVX512-NEXT: vmovdqa 96(%rsi), %xmm10
; AVX512-NEXT: vprold $16, %xmm10, %xmm11
; AVX512-NEXT: vmovdqa 96(%rdi), %xmm12
@@ -3153,7 +3153,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512-NEXT: vpermd 64(%rdx), %zmm18, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm10
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm22 & (zmm10 ^ zmm0))
; AVX512-NEXT: vmovdqa 96(%rdi), %ymm0
; AVX512-NEXT: vmovdqa %ymm6, %ymm2
; AVX512-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -3177,7 +3177,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm17
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm17
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 | (zmm0 & zmm19)
; AVX512-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa 64(%rsi), %ymm7
@@ -3197,7 +3197,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpermd %ymm14, %ymm16, %ymm6
; AVX512-NEXT: vpandn %ymm6, %ymm15, %ymm6
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512-NEXT: vpternlogq $248, %zmm15, %zmm0, %zmm5
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm0 & zmm15)
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa 32(%rsi), %ymm6
@@ -3217,7 +3217,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vpandnq %ymm6, %ymm22, %ymm6
; AVX512-NEXT: vpshufb %ymm9, %ymm8, %ymm7
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm0 & zmm19)
; AVX512-NEXT: vmovdqa64 %xmm24, %xmm2
; AVX512-NEXT: vprold $16, %xmm24, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
@@ -3236,7 +3236,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512-NEXT: vpermd (%rdx), %zmm18, %zmm1
-; AVX512-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm1
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm22 & (zmm1 ^ zmm0))
; AVX512-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm5, 192(%rcx)
@@ -3282,7 +3282,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vpandn %ymm3, %ymm15, %ymm3
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm15, %zmm10, %zmm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 | (zmm10 & zmm15)
; AVX512-FCP-NEXT: vmovdqa 96(%rsi), %xmm10
; AVX512-FCP-NEXT: vprold $16, %xmm10, %xmm11
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm12
@@ -3305,7 +3305,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512-FCP-NEXT: vpermd 64(%rdx), %zmm18, %zmm10
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm10
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm22 & (zmm10 ^ zmm0))
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa %ymm6, %ymm2
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -3329,7 +3329,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm17
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm17
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 | (zmm0 & zmm19)
; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa 64(%rsi), %ymm7
@@ -3349,7 +3349,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpermd %ymm14, %ymm16, %ymm6
; AVX512-FCP-NEXT: vpandn %ymm6, %ymm15, %ymm6
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm15, %zmm0, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm0 & zmm15)
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm6
@@ -3369,7 +3369,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vpandnq %ymm6, %ymm22, %ymm6
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm7
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm0 & zmm19)
; AVX512-FCP-NEXT: vmovdqa64 %xmm24, %xmm2
; AVX512-FCP-NEXT: vprold $16, %xmm24, %xmm0
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
@@ -3388,7 +3388,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512-FCP-NEXT: vpermd (%rdx), %zmm18, %zmm1
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm1
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm22 & (zmm1 ^ zmm0))
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 192(%rcx)
@@ -3434,7 +3434,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vpandn %ymm3, %ymm15, %ymm3
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
-; AVX512DQ-NEXT: vpternlogq $248, %zmm15, %zmm10, %zmm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 | (zmm10 & zmm15)
; AVX512DQ-NEXT: vmovdqa 96(%rsi), %xmm10
; AVX512DQ-NEXT: vprold $16, %xmm10, %xmm11
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm12
@@ -3457,7 +3457,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512DQ-NEXT: vpermd 64(%rdx), %zmm18, %zmm10
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm10
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm22 & (zmm10 ^ zmm0))
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm6, %ymm2
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -3481,7 +3481,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm17
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512DQ-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm17
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 | (zmm0 & zmm19)
; AVX512DQ-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa 64(%rsi), %ymm7
@@ -3501,7 +3501,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpermd %ymm14, %ymm16, %ymm6
; AVX512DQ-NEXT: vpandn %ymm6, %ymm15, %ymm6
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512DQ-NEXT: vpternlogq $248, %zmm15, %zmm0, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm0 & zmm15)
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512DQ-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm6
@@ -3521,7 +3521,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vpandnq %ymm6, %ymm22, %ymm6
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm8, %ymm7
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm0 & zmm19)
; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm2
; AVX512DQ-NEXT: vprold $16, %xmm24, %xmm0
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
@@ -3540,7 +3540,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512DQ-NEXT: vpermd (%rdx), %zmm18, %zmm1
-; AVX512DQ-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm1
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm22 & (zmm1 ^ zmm0))
; AVX512DQ-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm5, 192(%rcx)
@@ -3586,7 +3586,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vpandn %ymm3, %ymm15, %ymm3
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm15, %zmm10, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 | (zmm10 & zmm15)
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rsi), %xmm10
; AVX512DQ-FCP-NEXT: vprold $16, %xmm10, %xmm11
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm12
@@ -3609,7 +3609,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [5,5,0,6,6,0,7,7,0,8,8,0,9,9,0,10]
; AVX512DQ-FCP-NEXT: vpermd 64(%rdx), %zmm18, %zmm10
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm22 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm22 & (zmm10 ^ zmm0))
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa %ymm6, %ymm2
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
@@ -3633,7 +3633,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm19 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm17
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 | (zmm0 & zmm19)
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 64(%rsi), %ymm7
@@ -3653,7 +3653,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpermd %ymm14, %ymm16, %ymm6
; AVX512DQ-FCP-NEXT: vpandn %ymm6, %ymm15, %ymm6
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm15, %zmm0, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm0 & zmm15)
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm6
@@ -3673,7 +3673,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vpandnq %ymm6, %ymm22, %ymm6
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm8, %ymm7
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm19, %zmm0, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm0 & zmm19)
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm24, %xmm2
; AVX512DQ-FCP-NEXT: vprold $16, %xmm24, %xmm0
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
@@ -3692,7 +3692,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpermd (%rdx), %zmm18, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm0, %zmm22, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm22 & (zmm1 ^ zmm0))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 192(%rcx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 06d390f053c7ee..2936b55ef6ed4d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -717,14 +717,14 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: shrq $48, %rax
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0
; AVX512-NEXT: vmovq %xmm1, 32(%r9)
; AVX512-NEXT: vmovdqa %ymm0, (%r9)
@@ -748,14 +748,14 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-FCP-NEXT: shrq $48, %rax
; AVX512-FCP-NEXT: vmovd %eax, %xmm1
; AVX512-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0
; AVX512-FCP-NEXT: vmovq %xmm1, 32(%r9)
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%r9)
@@ -779,14 +779,14 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-NEXT: shrq $48, %rax
; AVX512DQ-NEXT: vmovd %eax, %xmm1
; AVX512DQ-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0
; AVX512DQ-NEXT: vmovq %xmm1, 32(%r9)
; AVX512DQ-NEXT: vmovdqa %ymm0, (%r9)
@@ -810,14 +810,14 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm3, %ymm4, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512DQ-FCP-NEXT: shrq $48, %rax
; AVX512DQ-FCP-NEXT: vmovd %eax, %xmm1
; AVX512DQ-FCP-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0
; AVX512DQ-FCP-NEXT: vmovq %xmm1, 32(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%r9)
@@ -1330,21 +1330,21 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u],zero,ymm8[7,u,u,u],zero,ymm8[8,u,u,u],zero,ymm8[9,u,u,u,26],zero,ymm8[u,u,u,27],zero,ymm8[u,u,u,28],zero,ymm8[u,u]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512-NEXT: vpternlogq $50, %ymm7, %ymm9, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ~ymm9 & (ymm8 | ymm7)
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm5[0,2,0,2]
; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8,u],zero,zero,ymm7[1,9,u],zero,zero,ymm7[2,10,u],zero,zero,ymm7[19,27,u],zero,zero,ymm7[20,28,u],zero,zero,ymm7[21,29,u],zero,zero
; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[6],zero,ymm5[u,u,u,7],zero,ymm5[u,u,u,8],zero,ymm5[u,u,u,9,25,u,u,u],zero,ymm5[26,u,u,u],zero,ymm5[27,u,u,u],zero,ymm5[28]
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1]
; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[6,u,u,u],zero,ymm5[7,u,u,u],zero,ymm5[8,u,u,u],zero,zero,ymm5[u,u,u,26],zero,ymm5[u,u,u,27],zero,ymm5[u,u,u,28],zero
-; AVX512-NEXT: vpternlogq $200, %ymm8, %ymm9, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm9 & (ymm5 | ymm8)
; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0]
; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,1,9],zero,zero,ymm6[u,2,10],zero,zero,ymm6[u,3,19],zero,zero,ymm6[u,28,20],zero,zero,ymm6[u,29,21],zero,zero,ymm6[u,30,22]
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
; AVX512-NEXT: vporq %zmm7, %zmm5, %zmm5
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-NEXT: vpermd %zmm1, %zmm6, %zmm6
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u]
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
@@ -1383,7 +1383,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm6
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512-FCP-NEXT: vpermd %zmm6, %zmm7, %zmm6
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u]
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
@@ -1410,21 +1410,21 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u],zero,ymm8[7,u,u,u],zero,ymm8[8,u,u,u],zero,ymm8[9,u,u,u,26],zero,ymm8[u,u,u,27],zero,ymm8[u,u,u,28],zero,ymm8[u,u]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512DQ-NEXT: vpternlogq $50, %ymm7, %ymm9, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ~ymm9 & (ymm8 | ymm7)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm5[0,2,0,2]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8,u],zero,zero,ymm7[1,9,u],zero,zero,ymm7[2,10,u],zero,zero,ymm7[19,27,u],zero,zero,ymm7[20,28,u],zero,zero,ymm7[21,29,u],zero,zero
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[6],zero,ymm5[u,u,u,7],zero,ymm5[u,u,u,8],zero,ymm5[u,u,u,9,25,u,u,u],zero,ymm5[26,u,u,u],zero,ymm5[27,u,u,u],zero,ymm5[28]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[6,u,u,u],zero,ymm5[7,u,u,u],zero,ymm5[8,u,u,u],zero,zero,ymm5[u,u,u,26],zero,ymm5[u,u,u,27],zero,ymm5[u,u,u,28],zero
-; AVX512DQ-NEXT: vpternlogq $200, %ymm8, %ymm9, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm9 & (ymm5 | ymm8)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,1,9],zero,zero,ymm6[u,2,10],zero,zero,ymm6[u,3,19],zero,zero,ymm6[u,28,20],zero,zero,ymm6[u,29,21],zero,zero,ymm6[u,30,22]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5
; AVX512DQ-NEXT: vporq %zmm7, %zmm5, %zmm5
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-NEXT: vpermd %zmm1, %zmm6, %zmm6
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
@@ -1463,7 +1463,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm6
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm6, %zmm7, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u]
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
@@ -1513,7 +1513,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15]
-; AVX512BW-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1
+; AVX512BW-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 | xmm0 | xmm2
; AVX512BW-NEXT: vmovdqa %xmm1, 64(%r9)
; AVX512BW-NEXT: vmovdqa64 %zmm5, (%r9)
; AVX512BW-NEXT: vzeroupper
@@ -1550,7 +1550,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15]
-; AVX512BW-FCP-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1
+; AVX512BW-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 | xmm0 | xmm2
; AVX512BW-FCP-NEXT: vmovdqa %xmm1, 64(%r9)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%r9)
; AVX512BW-FCP-NEXT: vzeroupper
@@ -1592,7 +1592,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15]
-; AVX512DQ-BW-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1
+; AVX512DQ-BW-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 | xmm0 | xmm2
; AVX512DQ-BW-NEXT: vmovdqa %xmm1, 64(%r9)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%r9)
; AVX512DQ-BW-NEXT: vzeroupper
@@ -1629,7 +1629,7 @@ define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15]
-; AVX512DQ-BW-FCP-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1
+; AVX512DQ-BW-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 | xmm0 | xmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm1, 64(%r9)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%r9)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
@@ -2438,7 +2438,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpor %xmm9, %xmm11, %xmm9
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512-NEXT: vpternlogq $226, %ymm6, %ymm11, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm6 ^ (ymm11 & (ymm9 ^ ymm6))
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm6
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
@@ -2447,19 +2447,19 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-NEXT: vpternlogq $226, %ymm8, %ymm7, %ymm5
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm5 = ymm8 ^ (ymm7 & (ymm5 ^ ymm8))
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
; AVX512-NEXT: vmovdqa (%r8), %xmm6
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-NEXT: vpermd %zmm6, %zmm8, %zmm6
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm11 & (ymm8 ^ ymm5))
; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
; AVX512-NEXT: vpor %ymm5, %ymm9, %ymm5
@@ -2468,7 +2468,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
; AVX512-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm5))
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
@@ -2477,7 +2477,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX512-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & mem)
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
; AVX512-NEXT: vpor %ymm3, %ymm4, %ymm3
@@ -2486,10 +2486,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
; AVX512-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm3 ^ (ymm7 & (ymm1 ^ ymm3))
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512-NEXT: vmovdqa64 %zmm6, (%r9)
@@ -2515,7 +2515,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpor %xmm8, %xmm10, %xmm8
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm10 & (ymm8 ^ ymm5))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm5
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
@@ -2524,19 +2524,19 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm7, %ymm6, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm7 ^ (ymm6 & (ymm4 ^ ymm7))
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[4,5,6,7]
; AVX512-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = mem[0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm10 & (ymm8 ^ ymm5))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
; AVX512-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
@@ -2545,7 +2545,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
; AVX512-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm5))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
@@ -2554,7 +2554,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpandn %ymm8, %ymm9, %ymm8
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & mem)
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
; AVX512-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
@@ -2563,10 +2563,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm6 & (ymm0 ^ ymm2))
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
; AVX512-FCP-NEXT: vmovdqa %ymm1, 128(%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
@@ -2593,7 +2593,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpor %xmm9, %xmm11, %xmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm6, %ymm11, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm6 ^ (ymm11 & (ymm9 ^ ymm6))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm6
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
@@ -2602,19 +2602,19 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm8, %ymm7, %ymm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm5 = ymm8 ^ (ymm7 & (ymm5 ^ ymm8))
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa (%r8), %xmm6
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-NEXT: vpermd %zmm6, %zmm8, %zmm6
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm5))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u]
; AVX512DQ-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u,u],zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u,u,19]
; AVX512DQ-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512DQ-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm11 & (ymm8 ^ ymm5))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21,u],zero,ymm4[20],zero,ymm4[22],zero,ymm4[24,u],zero,ymm4[23],zero,ymm4[25,u]
; AVX512DQ-NEXT: vpor %ymm5, %ymm9, %ymm5
@@ -2623,7 +2623,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25]
; AVX512DQ-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm5))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
@@ -2632,7 +2632,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
; AVX512DQ-NEXT: vpandn %ymm9, %ymm10, %ymm9
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & mem)
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm4[26],zero,ymm4[28,u],zero,ymm4[u],zero,ymm4[29],zero,ymm4[31,u],zero,ymm4[30]
; AVX512DQ-NEXT: vpor %ymm3, %ymm4, %ymm3
@@ -2641,10 +2641,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27,u],zero,ymm2[26],zero,ymm2[28],zero,ymm2[30,u],zero,ymm2[29],zero,ymm2[31,u]
; AVX512DQ-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm3, %ymm7, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm3 ^ (ymm7 & (ymm1 ^ ymm3))
; AVX512DQ-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm0 = ymm0 ^ (mem & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vmovdqa %ymm0, 128(%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, (%r9)
@@ -2670,7 +2670,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpor %xmm8, %xmm10, %xmm8
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm10 & (ymm8 ^ ymm5))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
@@ -2679,19 +2679,19 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm7, %ymm6, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm7 ^ (ymm6 & (ymm4 ^ ymm7))
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = mem[0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm7, %zmm7
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm8, %ymm5
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u,u],zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u,u,19]
; AVX512DQ-FCP-NEXT: vpor %ymm8, %ymm9, %ymm8
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm8 = ymm5 ^ (ymm10 & (ymm8 ^ ymm5))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21,u],zero,ymm3[20],zero,ymm3[22],zero,ymm3[24,u],zero,ymm3[23],zero,ymm3[25,u]
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm9, %ymm5
@@ -2700,7 +2700,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
; AVX512DQ-FCP-NEXT: vpor %ymm9, %ymm10, %ymm9
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm5))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm8 = [4,0,5,5,5,5,0,6]
@@ -2709,7 +2709,7 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpandn %ymm8, %ymm9, %ymm8
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm5 & mem)
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u],zero,ymm3[26],zero,ymm3[28,u],zero,ymm3[u],zero,ymm3[29],zero,ymm3[31,u],zero,ymm3[30]
; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm3, %ymm2
@@ -2718,10 +2718,10 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm1[27,u],zero,ymm1[26],zero,ymm1[28],zero,ymm1[30,u],zero,ymm1[29],zero,ymm1[31,u]
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm2, %ymm6, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm0 = ymm2 ^ (ymm6 & (ymm0 ^ ymm2))
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm1 = [6,6,6,0,7,7,7,7]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, 128(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%r9)
@@ -4877,33 +4877,33 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
; AVX512-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
-; AVX512-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm7 & zmm28)
; AVX512-NEXT: vporq %zmm24, %zmm26, %zmm5
; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
; AVX512-NEXT: vporq %zmm25, %zmm27, %zmm7
; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
-; AVX512-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm9 & (zmm7 ^ zmm5))
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm30 = zmm30 ^ (zmm29 & (zmm30 ^ zmm7))
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
-; AVX512-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm9 & (zmm1 ^ zmm0))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 | (zmm7 & mem)
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm31 = zmm31 ^ (mem & (zmm31 ^ zmm1))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-NEXT: vpermd %zmm15, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
; AVX512-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512-NEXT: vmovdqa64 %zmm0, (%r9)
; AVX512-NEXT: vmovdqa64 %zmm31, 128(%r9)
@@ -5045,33 +5045,33 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vporq %zmm24, %zmm26, %zmm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm6 & (zmm5 ^ zmm3))
; AVX512-FCP-NEXT: vpermt2d %zmm28, %zmm12, %zmm4
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm5, %zmm30, %zmm4
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm30 & (zmm4 ^ zmm5))
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 256(%r9)
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm18[0,0,1,1]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm3, %zmm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm20[0,0,1,1]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm4, %zmm4
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm5, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $248, %zmm11, %zmm4, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm5 & (zmm4 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm4 & zmm11)
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm25[2,2,3,3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm23, %zmm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm16[2,2,3,3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm27, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm5, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm6, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm5 & (zmm4 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm6 & (zmm1 ^ zmm0))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm4 & mem)
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm9[0,0,1,1,4,4,5,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm13[0,0,1,1,4,4,5,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512-FCP-NEXT: vpermd %zmm10, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
; AVX512-FCP-NEXT: vpermd %zmm28, %zmm3, %zmm3
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm1))
; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
@@ -5220,33 +5220,33 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm23[0,0,1,1]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm22, %zmm7, %zmm7
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
-; AVX512DQ-NEXT: vpternlogq $248, %zmm28, %zmm7, %zmm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm7 & zmm28)
; AVX512DQ-NEXT: vporq %zmm24, %zmm26, %zmm5
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
; AVX512DQ-NEXT: vporq %zmm25, %zmm27, %zmm7
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm7[2,2,3,3,6,6,7,7]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm9, %zmm7
-; AVX512DQ-NEXT: vpternlogd $184, %zmm7, %zmm29, %zmm30
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm9 & (zmm7 ^ zmm5))
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm30 = zmm30 ^ (zmm29 & (zmm30 ^ zmm7))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm17[2,2,3,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm16, %zmm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm19[2,2,3,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm18, %zmm7
-; AVX512DQ-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm5 ^ (zmm8 & (zmm7 ^ zmm5))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm0, %zmm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm14
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm9 & (zmm1 ^ zmm0))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm14 = zmm14 | (zmm7 & mem)
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm31 = zmm31 ^ (mem & (zmm31 ^ zmm1))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm4[0,0,1,1,4,4,5,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-NEXT: vpermd %zmm15, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm1))
; AVX512DQ-NEXT: vmovdqa64 %zmm14, 64(%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%r9)
; AVX512DQ-NEXT: vmovdqa64 %zmm31, 128(%r9)
@@ -5388,33 +5388,33 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vporq %zmm24, %zmm26, %zmm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm5[2,2,3,3,6,6,7,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm6, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm3 ^ (zmm6 & (zmm5 ^ zmm3))
; AVX512DQ-FCP-NEXT: vpermt2d %zmm28, %zmm12, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm5, %zmm30, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm30 & (zmm4 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 256(%r9)
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm18[0,0,1,1]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm17, %zmm3, %zmm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm20[0,0,1,1]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm19, %zmm4, %zmm4
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm5, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $248, %zmm11, %zmm4, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm5 & (zmm4 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm4 & zmm11)
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm25[2,2,3,3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm23, %zmm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm16[2,2,3,3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm27, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm5, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm6, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm3 ^ (zmm5 & (zmm4 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm6 & (zmm1 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm4 & mem)
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm9[0,0,1,1,4,4,5,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm13[0,0,1,1,4,4,5,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,9,9,0,10,10,10,10,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm10, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm3))
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [6,6,6,0,7,7,7,7,0,8,8,8,8,0,9,9]
; AVX512DQ-FCP-NEXT: vpermd %zmm28, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm1))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 128(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 64(%r9)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%r9)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 60af864597f4f7..7beba6e30cf4c7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -807,7 +807,7 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -839,7 +839,7 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
; AVX512-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -871,7 +871,7 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
; AVX512DQ-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -903,7 +903,7 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm4 ^ (mem & (ymm4 ^ ymm3))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
; AVX512DQ-FCP-NEXT: vpor %xmm0, %xmm1, %xmm0
@@ -1415,7 +1415,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u],zero,zero,ymm1[3,11,u,u],zero,zero,ymm1[4,12,u,u],zero,zero,ymm1[21,29,u,u],zero,zero,ymm1[22,30,u,u],zero,zero,ymm1[23,31,u,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
@@ -1423,7 +1423,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10],zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm0 & mem)
; AVX512-NEXT: vmovdqa %ymm1, 64(%rax)
; AVX512-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512-NEXT: vzeroupper
@@ -1454,7 +1454,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u],zero,zero,ymm1[3,11,u,u],zero,zero,ymm1[4,12,u,u],zero,zero,ymm1[21,29,u,u],zero,zero,ymm1[22,30,u,u],zero,zero,ymm1[23,31,u,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
@@ -1462,7 +1462,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10],zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm0 & mem)
; AVX512-FCP-NEXT: vmovdqa %ymm1, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512-FCP-NEXT: vzeroupper
@@ -1493,7 +1493,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u],zero,zero,ymm1[3,11,u,u],zero,zero,ymm1[4,12,u,u],zero,zero,ymm1[21,29,u,u],zero,zero,ymm1[22,30,u,u],zero,zero,ymm1[23,31,u,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
@@ -1501,7 +1501,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10],zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm0 & mem)
; AVX512DQ-NEXT: vmovdqa %ymm1, 64(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512DQ-NEXT: vzeroupper
@@ -1532,7 +1532,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm2[0,2,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[6,14],zero,zero,zero,zero,ymm5[7,15],zero,zero,zero,zero,ymm5[16,24],zero,zero,zero,zero,ymm5[17,25],zero,zero,zero,zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 | (zmm3 & mem)
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u],zero,zero,ymm1[3,11,u,u],zero,zero,ymm1[4,12,u,u],zero,zero,ymm1[21,29,u,u],zero,zero,ymm1[22,30,u,u],zero,zero,ymm1[23,31,u,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
@@ -1540,7 +1540,7 @@ define void @store_i8_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[2,10],zero,zero,zero,zero,ymm1[3,11],zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,ymm1[21,29],zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,ymm1[23,31]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 | (ymm0 & mem)
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
@@ -2663,7 +2663,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm13
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-NEXT: vpternlogd $226, %zmm6, %zmm14, %zmm13
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm13 = zmm6 ^ (zmm14 & (zmm13 ^ zmm6))
; AVX512-NEXT: vmovdqa (%r9), %xmm11
; AVX512-NEXT: vmovdqa (%r8), %xmm12
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
@@ -2674,7 +2674,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufhw {{.*#+}} ymm15 = ymm15[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm15, %zmm6, %zmm6
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm6
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm13))
; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
; AVX512-NEXT: vpshufb %xmm13, %xmm9, %xmm15
; AVX512-NEXT: vpshufb %xmm13, %xmm10, %xmm13
@@ -2692,7 +2692,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,5]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm7[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm8
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm8 = zmm13 ^ (mem & (zmm8 ^ zmm13))
; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
; AVX512-NEXT: vpshufb %xmm7, %xmm11, %xmm9
; AVX512-NEXT: vpshufb %xmm7, %xmm12, %xmm10
@@ -2702,7 +2702,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,4,4]
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm10, %zmm9
; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogd $184, %zmm8, %zmm14, %zmm9
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (zmm14 & (zmm9 ^ zmm8))
; AVX512-NEXT: vpshufb %ymm15, %ymm4, %ymm8
; AVX512-NEXT: vpshufb %ymm15, %ymm2, %ymm10
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[4],ymm8[4],ymm10[5],ymm8[5],ymm10[6],ymm8[6],ymm10[7],ymm8[7],ymm10[16],ymm8[16],ymm10[17],ymm8[17],ymm10[18],ymm8[18],ymm10[19],ymm8[19],ymm10[20],ymm8[20],ymm10[21],ymm8[21],ymm10[22],ymm8[22],ymm10[23],ymm8[23]
@@ -2718,7 +2718,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm8))
; AVX512-NEXT: vpshufb %ymm7, %ymm1, %ymm3
; AVX512-NEXT: vpshufb %ymm7, %ymm0, %ymm4
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
@@ -2726,7 +2726,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm9, (%rax)
; AVX512-NEXT: vmovdqa64 %zmm6, 64(%rax)
@@ -2758,7 +2758,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm8
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm6))
; AVX512-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm6
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm9
@@ -2767,7 +2767,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm6, %zmm6
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm8))
; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm9
; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm11
@@ -2787,7 +2787,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vprold $16, %xmm14, %xmm14
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm13[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm14
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm12))
; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm12
; AVX512-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm15
; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm13
@@ -2798,7 +2798,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[0,0,0,1,4,4,4,5]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm14, %zmm15, %zmm10
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm15 & (zmm10 ^ zmm14))
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
; AVX512-FCP-NEXT: vprold $16, %ymm4, %ymm4
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
@@ -2810,13 +2810,13 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm9, %zmm3
-; AVX512-FCP-NEXT: vpternlogd $226, %zmm5, %zmm15, %zmm3
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm5 ^ (zmm15 & (zmm3 ^ zmm5))
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm9, %zmm1
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
@@ -2852,7 +2852,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm13
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm14 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogd $226, %zmm6, %zmm14, %zmm13
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm13 = zmm6 ^ (zmm14 & (zmm13 ^ zmm6))
; AVX512DQ-NEXT: vmovdqa (%r9), %xmm11
; AVX512DQ-NEXT: vmovdqa (%r8), %xmm12
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
@@ -2863,7 +2863,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufhw {{.*#+}} ymm15 = ymm15[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm15, %zmm6, %zmm6
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm6
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm13))
; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} xmm13 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
; AVX512DQ-NEXT: vpshufb %xmm13, %xmm9, %xmm15
; AVX512DQ-NEXT: vpshufb %xmm13, %xmm10, %xmm13
@@ -2881,7 +2881,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,5]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm7[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm8
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm8 = zmm13 ^ (mem & (zmm8 ^ zmm13))
; AVX512DQ-NEXT: vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
; AVX512DQ-NEXT: vpshufb %xmm7, %xmm11, %xmm9
; AVX512DQ-NEXT: vpshufb %xmm7, %xmm12, %xmm10
@@ -2891,7 +2891,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,4,4]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm10, %zmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm8, %zmm14, %zmm9
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (zmm14 & (zmm9 ^ zmm8))
; AVX512DQ-NEXT: vpshufb %ymm15, %ymm4, %ymm8
; AVX512DQ-NEXT: vpshufb %ymm15, %ymm2, %ymm10
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[4],ymm8[4],ymm10[5],ymm8[5],ymm10[6],ymm8[6],ymm10[7],ymm8[7],ymm10[16],ymm8[16],ymm10[17],ymm8[17],ymm10[18],ymm8[18],ymm10[19],ymm8[19],ymm10[20],ymm8[20],ymm10[21],ymm8[21],ymm10[22],ymm8[22],ymm10[23],ymm8[23]
@@ -2907,7 +2907,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm2 = zmm2[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm8))
; AVX512DQ-NEXT: vpshufb %ymm7, %ymm1, %ymm3
; AVX512DQ-NEXT: vpshufb %ymm7, %ymm0, %ymm4
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
@@ -2915,7 +2915,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm9, (%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, 64(%rax)
@@ -2947,7 +2947,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm6))
; AVX512DQ-FCP-NEXT: vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm1, %ymm6
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm0, %ymm9
@@ -2956,7 +2956,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm6, %zmm6
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm8))
; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm9
; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm9, %xmm8
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm11
@@ -2976,7 +2976,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vprold $16, %xmm14, %xmm14
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm14, %zmm13
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm14 = zmm13[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm14
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm14 = zmm14 ^ (mem & (zmm14 ^ zmm12))
; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm12
; AVX512DQ-FCP-NEXT: vpshufb %xmm10, %xmm12, %xmm15
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm13
@@ -2987,7 +2987,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm15, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[0,0,0,1,4,4,4,5]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm14, %zmm15, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (zmm15 & (zmm10 ^ zmm14))
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
; AVX512DQ-FCP-NEXT: vprold $16, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
@@ -2999,13 +2999,13 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm9, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogd $226, %zmm5, %zmm15, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm5 ^ (zmm15 & (zmm3 ^ zmm5))
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm9, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 64(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 128(%rax)
@@ -5613,61 +5613,61 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm8
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm8
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm2 ^ (zmm9 & (zmm8 ^ zmm2))
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-NEXT: vpternlogd $184, %zmm8, %zmm5, %zmm2
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (zmm5 & (zmm2 ^ zmm8))
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm14[2,2,2,3]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm13, %zmm7
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm1
-; AVX512-NEXT: vpternlogq $226, %zmm7, %zmm9, %zmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm7 ^ (zmm9 & (zmm1 ^ zmm7))
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm31, %zmm6
-; AVX512-NEXT: vpternlogd $184, %zmm1, %zmm5, %zmm6
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (zmm5 & (zmm6 ^ zmm1))
; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm12, %zmm1
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512-NEXT: vpternlogd $184, %zmm2, %zmm4, %zmm1
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm2))
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm11, %zmm2
-; AVX512-NEXT: vpternlogd $184, %zmm6, %zmm4, %zmm2
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (zmm4 & (zmm2 ^ zmm6))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
; AVX512-NEXT: # zmm3 = mem[0,0,0,1,4,4,4,5]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm3
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm4 & (zmm3 ^ zmm0))
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512-NEXT: # zmm0 = mem[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogq $184, %zmm3, %zmm9, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm3))
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
; AVX512-NEXT: # zmm3 = mem[0,0,0,1,4,4,4,5]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm3
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm5 & (zmm3 ^ zmm0))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm30[0,0,0,1,4,4,4,5]
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
; AVX512-NEXT: # zmm6 = mem[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm6
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm4 & (zmm6 ^ zmm0))
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512-NEXT: # zmm0 = mem[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogq $184, %zmm6, %zmm9, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm6))
; AVX512-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
; AVX512-NEXT: # zmm6 = mem[0,0,0,1,4,4,4,5]
-; AVX512-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm6
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (zmm5 & (zmm6 ^ zmm0))
; AVX512-NEXT: vpermq $234, (%rsp), %zmm0 # 64-byte Folded Reload
; AVX512-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
; AVX512-NEXT: # zmm5 = mem[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm5
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm0 ^ (zmm9 & (zmm5 ^ zmm0))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm26[2,2,2,3,6,6,6,7]
; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm0 ^ (zmm9 & (zmm7 ^ zmm0))
; AVX512-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogq $184, %zmm5, %zmm4, %zmm0
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm4 & (zmm0 ^ zmm5))
; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm23[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogq $184, %zmm7, %zmm4, %zmm5
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm4 & (zmm5 ^ zmm7))
; AVX512-NEXT: vpermq {{.*#+}} zmm4 = zmm28[2,2,2,3,6,6,6,7]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512-NEXT: vpternlogd $184, %zmm0, %zmm7, %zmm4
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm7 & (zmm4 ^ zmm0))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm25[2,2,2,3,6,6,6,7]
-; AVX512-NEXT: vpternlogd $184, %zmm5, %zmm7, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm7 & (zmm0 ^ zmm5))
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: vmovdqa64 %zmm0, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm4, 320(%rax)
@@ -5835,7 +5835,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm4, %zmm7
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm7, %zmm9, %zmm15
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm7 ^ (zmm9 & (zmm15 ^ zmm7))
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
; AVX512-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm0
@@ -5845,7 +5845,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
; AVX512-FCP-NEXT: vpermt2q %zmm0, %zmm4, %zmm1
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm15, %zmm10, %zmm1
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm10 & (zmm1 ^ zmm15))
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
; AVX512-FCP-NEXT: # ymm15 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
@@ -5854,7 +5854,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm8, %xmm0
; AVX512-FCP-NEXT: vpermt2q %zmm2, %zmm4, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm1, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm1))
; AVX512-FCP-NEXT: vmovdqa64 %ymm22, %ymm8
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[1],ymm8[1],ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[4],ymm8[4],ymm1[5],ymm8[5],ymm1[6],ymm8[6],ymm1[7],ymm8[7],ymm1[16],ymm8[16],ymm1[17],ymm8[17],ymm1[18],ymm8[18],ymm1[19],ymm8[19],ymm1[20],ymm8[20],ymm1[21],ymm8[21],ymm1[22],ymm8[22],ymm1[23],ymm8[23]
@@ -5872,18 +5872,18 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermt2q %zmm5, %zmm4, %zmm6
; AVX512-FCP-NEXT: vprold $16, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm3, %zmm9, %zmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm3 ^ (zmm9 & (zmm6 ^ zmm3))
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512-FCP-NEXT: vmovdqa64 %ymm19, %ymm3
; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm3
; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm6, %zmm10, %zmm3
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm10 & (zmm3 ^ zmm6))
; AVX512-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512-FCP-NEXT: vpshufb %ymm15, %ymm1, %ymm1
; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm5
; AVX512-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm3, %zmm2, %zmm5
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (zmm2 & (zmm5 ^ zmm3))
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 256(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
@@ -5891,39 +5891,39 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm1 = mem[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm9 & (zmm1 ^ zmm0))
; AVX512-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm1))
; AVX512-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm1 = mem[2,2,2,3,6,6,6,7]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm0, %zmm3, %zmm1
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm4 = mem[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm4
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (zmm9 & (zmm4 ^ zmm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm23[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm4))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm24[2,2,2,3,6,6,6,7]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm0, %zmm3, %zmm4
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm3 & (zmm4 ^ zmm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm30[0,0,0,1,4,4,4,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm26[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm2, %zmm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm2 & (zmm3 ^ zmm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm31[0,0,0,1,4,4,4,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm25[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm0, %zmm2, %zmm5
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm0 ^ (zmm2 & (zmm5 ^ zmm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm29[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm3, %zmm9, %zmm0
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm3))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm27[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm5, %zmm9, %zmm2
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm9 & (zmm2 ^ zmm5))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm21[0,0,0,1,4,4,4,5]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm3
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm5 & (zmm3 ^ zmm0))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,0,1,4,4,4,5]
-; AVX512-FCP-NEXT: vpternlogd $184, %zmm2, %zmm5, %zmm0
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm5 & (zmm0 ^ zmm2))
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
@@ -6142,61 +6142,61 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm2, %zmm2
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm8
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm2, %zmm9, %zmm8
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm2 ^ (zmm9 & (zmm8 ^ zmm2))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm7, %zmm2
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm8, %zmm5, %zmm2
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (zmm5 & (zmm2 ^ zmm8))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm14[2,2,2,3]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm13, %zmm7
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm15, %zmm1
-; AVX512DQ-NEXT: vpternlogq $226, %zmm7, %zmm9, %zmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm7 ^ (zmm9 & (zmm1 ^ zmm7))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm31, %zmm6
-; AVX512DQ-NEXT: vpternlogd $184, %zmm1, %zmm5, %zmm6
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (zmm5 & (zmm6 ^ zmm1))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm12, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm2, %zmm4, %zmm1
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm4 & (zmm1 ^ zmm2))
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm11, %zmm2
-; AVX512DQ-NEXT: vpternlogd $184, %zmm6, %zmm4, %zmm2
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm2 = zmm2 ^ (zmm4 & (zmm2 ^ zmm6))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm3 = mem[0,0,0,1,4,4,4,5]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm4 & (zmm3 ^ zmm0))
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm0 = mem[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm3, %zmm9, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm3))
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm3 = mem[0,0,0,1,4,4,4,5]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm3
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm5 & (zmm3 ^ zmm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm30[0,0,0,1,4,4,4,5]
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm6 = mem[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm4, %zmm6
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm0 ^ (zmm4 & (zmm6 ^ zmm0))
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm0 = mem[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm6, %zmm9, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm6))
; AVX512DQ-NEXT: vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm6 = mem[0,0,0,1,4,4,4,5]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm6
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (zmm5 & (zmm6 ^ zmm0))
; AVX512DQ-NEXT: vpermq $234, (%rsp), %zmm0 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm5 = mem[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm0 ^ (zmm9 & (zmm5 ^ zmm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm26[2,2,2,3,6,6,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm24[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm0 ^ (zmm9 & (zmm7 ^ zmm0))
; AVX512DQ-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm5, %zmm4, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm4 & (zmm0 ^ zmm5))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm23[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm7, %zmm4, %zmm5
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (zmm4 & (zmm5 ^ zmm7))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm4 = zmm28[2,2,2,3,6,6,6,7]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm7 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm0, %zmm7, %zmm4
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm7 & (zmm4 ^ zmm0))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm25[2,2,2,3,6,6,6,7]
-; AVX512DQ-NEXT: vpternlogd $184, %zmm5, %zmm7, %zmm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm7 & (zmm0 ^ zmm5))
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, 320(%rax)
@@ -6364,7 +6364,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm4, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm7, %zmm9, %zmm15
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm15 = zmm7 ^ (zmm9 & (zmm15 ^ zmm7))
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0]
; AVX512DQ-FCP-NEXT: # ymm2 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm0
@@ -6374,7 +6374,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vpermt2q %zmm0, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm15, %zmm10, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm10 & (zmm1 ^ zmm15))
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0,0,2,0,1,0,0,0,3,0,0,0,0,0,4,0,0]
; AVX512DQ-FCP-NEXT: # ymm15 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm20, %ymm0
@@ -6383,7 +6383,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm8, %xmm0
; AVX512DQ-FCP-NEXT: vpermt2q %zmm2, %zmm4, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm1, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm1))
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm22, %ymm8
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[1],ymm8[1],ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[4],ymm8[4],ymm1[5],ymm8[5],ymm1[6],ymm8[6],ymm1[7],ymm8[7],ymm1[16],ymm8[16],ymm1[17],ymm8[17],ymm1[18],ymm8[18],ymm1[19],ymm8[19],ymm1[20],ymm8[20],ymm1[21],ymm8[21],ymm1[22],ymm8[22],ymm1[23],ymm8[23]
@@ -6401,18 +6401,18 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermt2q %zmm5, %zmm4, %zmm6
; AVX512DQ-FCP-NEXT: vprold $16, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm3, %zmm9, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm3 ^ (zmm9 & (zmm6 ^ zmm3))
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm19, %ymm3
; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm13, %xmm3
; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm3
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm6, %zmm10, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm10 & (zmm3 ^ zmm6))
; AVX512DQ-FCP-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX512DQ-FCP-NEXT: vpshufb %ymm15, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm14, %xmm5
; AVX512DQ-FCP-NEXT: vpermt2q %zmm1, %zmm4, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm3, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (zmm2 & (zmm5 ^ zmm3))
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 256(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
@@ -6420,39 +6420,39 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512DQ-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm1 = mem[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm0 ^ (zmm9 & (zmm1 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm2 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm1, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm1))
; AVX512DQ-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm1 = mem[2,2,2,3,6,6,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm0, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (zmm3 & (zmm1 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm0 = mem[2,2,2,3,6,6,6,7]
; AVX512DQ-FCP-NEXT: vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm4 = mem[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm9, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm0 ^ (zmm9 & (zmm4 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm23[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm4, %zmm2, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm2 & (zmm0 ^ zmm4))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm24[2,2,2,3,6,6,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm0, %zmm3, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm4 ^ (zmm3 & (zmm4 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm30[0,0,0,1,4,4,4,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm26[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm2, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm3 = zmm0 ^ (zmm2 & (zmm3 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm31[0,0,0,1,4,4,4,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm25[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm0, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm0 ^ (zmm2 & (zmm5 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm29[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm3, %zmm9, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (zmm9 & (zmm0 ^ zmm3))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm27[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm5, %zmm9, %zmm2
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (zmm9 & (zmm2 ^ zmm5))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm21[0,0,0,1,4,4,4,5]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm0, %zmm5, %zmm3
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm3 = zmm3 ^ (zmm5 & (zmm3 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,0,1,4,4,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $184, %zmm2, %zmm5, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (zmm5 & (zmm0 ^ zmm2))
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 192(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 128(%rax)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 416fbe9aa340ca..324d1ceef10121 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -1059,7 +1059,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[4,12],zero,ymm1[u,u,u,u,5,13],zero,ymm1[u,u,u,u,6,14,22,u,u,u,u],zero,zero,ymm1[23,u,u,u,u,u,u,u,u]
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
; AVX512-NEXT: vporq %zmm0, %zmm1, %zmm0
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX512-NEXT: vmovq %xmm1, 48(%rax)
@@ -1095,7 +1095,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm2[19,27,u,u,u],zero,zero,ymm2[20,28]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,zero,zero,ymm3[u,u,u,19,27],zero,zero,ymm3[u,u,u,20,28],zero,zero
-; AVX512-FCP-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = mem & (ymm3 | ymm1)
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [1,3,5,7,1,3,5,7]
; AVX512-FCP-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm1
@@ -1142,7 +1142,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[4,12],zero,ymm1[u,u,u,u,5,13],zero,ymm1[u,u,u,u,6,14,22,u,u,u,u],zero,zero,ymm1[23,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
; AVX512DQ-NEXT: vporq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512DQ-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
; AVX512DQ-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX512DQ-NEXT: vmovq %xmm1, 48(%rax)
@@ -1178,7 +1178,7 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm2[19,27,u,u,u],zero,zero,ymm2[20,28]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,zero,zero,ymm3[u,u,u,19,27],zero,zero,ymm3[u,u,u,20,28],zero,zero
-; AVX512DQ-FCP-NEXT: vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = mem & (ymm3 | ymm1)
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [1,3,5,7,1,3,5,7]
; AVX512DQ-FCP-NEXT: # ymm1 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermd %ymm2, %ymm1, %ymm1
@@ -2065,14 +2065,14 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm9[2,3,0,1]
; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u],zero,ymm11[5,u,u,u,u,u],zero,ymm11[6,u,u,u,u,u,23],zero,ymm11[u,u,u,u,u,24],zero,ymm11[u,u,u,u]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; AVX512-NEXT: vpternlogq $50, %ymm10, %ymm12, %ymm11
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm11 = ~ymm12 & (ymm11 | ymm10)
; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm9[0,2,0,2]
; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm8[u,u,u,5],zero,ymm8[u,u,u,u,u,6],zero,ymm8[u,u,u,u,u],zero,ymm8[23,u,u,u,u,u],zero,ymm8[24,u,u,u,u,u],zero
; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,3,0,1]
; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u],zero,ymm13[5,u,u,u,u,u],zero,ymm13[6,u,u,u,u,u,23],zero,ymm13[u,u,u,u,u,24],zero,ymm13[u,u,u,u,u,25]
-; AVX512-NEXT: vpternlogq $200, %ymm11, %ymm12, %ymm13
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm13 = ymm12 & (ymm13 | ymm11)
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm8[0,2,0,2]
; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,ymm11[u,u,u,1,9],zero,zero,ymm11[u,u,u,2,10],zero,zero,ymm11[u,u,u,19,27],zero,zero,ymm11[u,u,u,20,28],zero,zero
; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
@@ -2081,7 +2081,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = zero,ymm12[4,u,u,u,u,u],zero,ymm12[5,u,u,u,u,u],zero,ymm12[6,u,u,u,u,u,23],zero,ymm12[u,u,u,u,u,24],zero,ymm12[u,u]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; AVX512-NEXT: vpternlogq $200, %ymm11, %ymm13, %ymm12
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm12 = ymm13 & (ymm12 | ymm11)
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm7[0,2,0,2]
; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,0,8],zero,ymm11[u,u,u,u,1,9],zero,ymm11[u,u,u,u,18,26],zero,ymm11[u,u,u,u,19,27],zero,ymm11[u,u,u,u]
; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
@@ -2094,7 +2094,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
; AVX512-NEXT: vporq %zmm12, %zmm11, %zmm11
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm10))
; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[3,1,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,ymm8[u,u,u,10,2],zero,zero,ymm8[u,u,u,11,3],zero,zero,ymm8[u,u,u,20,28],zero,zero,ymm8[u,u,u,21,29],zero,zero,ymm8[u]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,3,1]
@@ -2105,8 +2105,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
-; AVX512-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm7
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm9 & ~mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm8))
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -2116,7 +2116,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
; AVX512-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vinserti32x4 $2, %xmm1, %zmm7, %zmm0
; AVX512-NEXT: vmovdqa %xmm1, 96(%rax)
; AVX512-NEXT: vmovdqa %ymm0, 64(%rax)
@@ -2162,8 +2162,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermd %ymm9, %ymm11, %ymm11
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,4],zero,ymm11[u,u,u,u,1,5],zero,ymm11[u,u,u,u,2,6],zero,ymm11[u,u,u,u,19,23],zero,ymm11[u,u,u,u,24,28],zero,ymm11[u]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm11
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm11
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 | (zmm12 & mem)
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm10))
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[3,1,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,ymm7[u,u,u,10,2],zero,zero,ymm7[u,u,u,11,3],zero,zero,ymm7[u,u,u,20,28],zero,zero,ymm7[u,u,u,21,29],zero,zero,ymm7[u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[1,3,3,1]
@@ -2175,8 +2175,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,1,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,1,9],zero,ymm9[u,u,u,u,2,10],zero,ymm9[u,u,u,u,19,27],zero,ymm9[u,u,u,u,20,28],zero,ymm9[u,u,u,u,21]
-; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm9
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm8 & ~mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm7))
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -2186,7 +2186,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
; AVX512-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm0
; AVX512-FCP-NEXT: vmovdqa %xmm1, 96(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
@@ -2212,14 +2212,14 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm9[2,3,0,1]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u],zero,ymm11[5,u,u,u,u,u],zero,ymm11[6,u,u,u,u,u,23],zero,ymm11[u,u,u,u,u,24],zero,ymm11[u,u,u,u]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $50, %ymm10, %ymm12, %ymm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm11 = ~ymm12 & (ymm11 | ymm10)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm9[0,2,0,2]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm8[u,u,u,5],zero,ymm8[u,u,u,u,u,6],zero,ymm8[u,u,u,u,u],zero,ymm8[23,u,u,u,u,u],zero,ymm8[24,u,u,u,u,u],zero
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,3,0,1]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u],zero,ymm13[5,u,u,u,u,u],zero,ymm13[6,u,u,u,u,u,23],zero,ymm13[u,u,u,u,u,24],zero,ymm13[u,u,u,u,u,25]
-; AVX512DQ-NEXT: vpternlogq $200, %ymm11, %ymm12, %ymm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm13 = ymm12 & (ymm13 | ymm11)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm8[0,2,0,2]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,ymm11[u,u,u,1,9],zero,zero,ymm11[u,u,u,2,10],zero,zero,ymm11[u,u,u,19,27],zero,zero,ymm11[u,u,u,20,28],zero,zero
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
@@ -2228,7 +2228,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = zero,ymm12[4,u,u,u,u,u],zero,ymm12[5,u,u,u,u,u],zero,ymm12[6,u,u,u,u,u,23],zero,ymm12[u,u,u,u,u,24],zero,ymm12[u,u]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; AVX512DQ-NEXT: vpternlogq $200, %ymm11, %ymm13, %ymm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm12 = ymm13 & (ymm12 | ymm11)
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm7[0,2,0,2]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,0,8],zero,ymm11[u,u,u,u,1,9],zero,ymm11[u,u,u,u,18,26],zero,ymm11[u,u,u,u,19,27],zero,ymm11[u,u,u,u]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
@@ -2241,7 +2241,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
; AVX512DQ-NEXT: vporq %zmm12, %zmm11, %zmm11
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm10))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[3,1,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,ymm8[u,u,u,10,2],zero,zero,ymm8[u,u,u,11,3],zero,zero,ymm8[u,u,u,20,28],zero,zero,ymm8[u,u,u,21,29],zero,zero,ymm8[u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,3,1]
@@ -2252,8 +2252,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
-; AVX512DQ-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 | (ymm9 & ~mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm7 = ymm7 ^ (mem & (ymm7 ^ ymm8))
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -2263,7 +2263,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
; AVX512DQ-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm1, %zmm7, %zmm0
; AVX512DQ-NEXT: vmovdqa %xmm1, 96(%rax)
; AVX512DQ-NEXT: vmovdqa %ymm0, 64(%rax)
@@ -2309,8 +2309,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermd %ymm9, %ymm11, %ymm11
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,4],zero,ymm11[u,u,u,u,1,5],zero,ymm11[u,u,u,u,2,6],zero,ymm11[u,u,u,u,19,23],zero,ymm11[u,u,u,u,24,28],zero,ymm11[u]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm11
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 | (zmm12 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm10))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[3,1,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,ymm7[u,u,u,10,2],zero,zero,ymm7[u,u,u,11,3],zero,zero,ymm7[u,u,u,20,28],zero,zero,ymm7[u,u,u,21,29],zero,zero,ymm7[u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[1,3,3,1]
@@ -2322,8 +2322,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,1,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,1,9],zero,ymm9[u,u,u,u,2,10],zero,ymm9[u,u,u,u,19,27],zero,ymm9[u,u,u,u,20,28],zero,ymm9[u,u,u,u,21]
-; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 | (ymm8 & ~mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm9 = ymm9 ^ (mem & (ymm9 ^ ymm7))
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
@@ -2333,7 +2333,7 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, 96(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
@@ -4184,7 +4184,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vporq %zmm8, %zmm9, %zmm8
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm7))
; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
; AVX512-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX512-NEXT: vmovdqa64 %ymm0, %ymm18
@@ -4200,8 +4200,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
; AVX512-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vporq %zmm7, %zmm9, %zmm7
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm7 = zmm10 ^ (mem & (zmm7 ^ zmm10))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm8))
; AVX512-NEXT: vmovdqa (%rsi), %xmm9
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
; AVX512-NEXT: vmovdqa (%rdi), %xmm10
@@ -4220,7 +4220,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
; AVX512-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm8))
; AVX512-NEXT: vmovdqa (%r9), %xmm11
; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
; AVX512-NEXT: vmovdqa (%r8), %xmm12
@@ -4236,8 +4236,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm17))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm16))
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
@@ -4252,7 +4252,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
; AVX512-NEXT: vmovdqa64 %ymm19, %ymm14
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u],zero,zero,zero,zero,ymm14[14],zero,ymm14[u],zero,zero,zero,zero,ymm14[15],zero,ymm14[u],zero,zero,zero,zero,ymm14[16],zero,ymm14[u],zero,zero,zero,zero,ymm14[17],zero,ymm14[u],zero,zero
; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
@@ -4268,8 +4268,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vmovdqa64 %ymm18, %ymm11
; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm0 & mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm1))
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
@@ -4278,15 +4278,15 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
; AVX512-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
; AVX512-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
+; AVX512-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm1))
; AVX512-NEXT: vmovdqa %ymm2, 192(%rax)
; AVX512-NEXT: vmovdqa64 %zmm8, (%rax)
; AVX512-NEXT: vmovdqa64 %zmm7, 128(%rax)
@@ -4323,7 +4323,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm13, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm15 = zmm15 ^ (mem & (zmm15 ^ zmm7))
; AVX512-FCP-NEXT: vmovdqa (%r10), %xmm10
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,0,1,0,0,0,0]
@@ -4340,8 +4340,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm7 = zmm16 ^ (mem & (zmm7 ^ zmm16))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm15))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
@@ -4356,7 +4356,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm0))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u],zero,zero,zero,zero,ymm1[14],zero,ymm1[u],zero,zero,zero,zero,ymm1[15],zero,ymm1[u],zero,zero,zero,zero,ymm1[16],zero,ymm1[u],zero,zero,zero,zero,ymm1[17],zero,ymm1[u],zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
@@ -4372,8 +4372,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm0 & mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm9))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
@@ -4392,7 +4392,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm0))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
@@ -4408,8 +4408,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (mem & (zmm10 ^ zmm0))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (mem & (zmm10 ^ zmm9))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
; AVX512-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
@@ -4418,15 +4418,15 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
; AVX512-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm0))
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
; AVX512-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm3))
; AVX512-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
@@ -4463,7 +4463,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vporq %zmm8, %zmm9, %zmm8
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm7))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,u,u,u,u,26,27,24,25]
; AVX512DQ-NEXT: vpshuflw {{.*#+}} ymm9 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
; AVX512DQ-NEXT: vmovdqa64 %ymm0, %ymm18
@@ -4479,8 +4479,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm9, %zmm9
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm9 = zmm9[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vporq %zmm7, %zmm9, %zmm7
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm7 = zmm10 ^ (mem & (zmm7 ^ zmm10))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm8))
; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm9
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm10
@@ -4499,7 +4499,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512DQ-NEXT: vinserti32x4 $2, %xmm11, %zmm12, %zmm11
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm16 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm16
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm8))
; AVX512DQ-NEXT: vmovdqa (%r9), %xmm11
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
; AVX512DQ-NEXT: vmovdqa (%r8), %xmm12
@@ -4515,8 +4515,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm0[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm8
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm8
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm17))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm16))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
@@ -4531,7 +4531,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm9, %zmm1
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm1 = zmm1 ^ (mem & (zmm1 ^ zmm0))
; AVX512DQ-NEXT: vmovdqa64 %ymm19, %ymm14
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm14[u],zero,zero,zero,zero,ymm14[14],zero,ymm14[u],zero,zero,zero,zero,ymm14[15],zero,ymm14[u],zero,zero,zero,zero,ymm14[16],zero,ymm14[u],zero,zero,zero,zero,ymm14[17],zero,ymm14[u],zero,zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
@@ -4547,8 +4547,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vmovdqa64 %ymm18, %ymm11
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm11[13,u,u,u,u],zero,zero,ymm11[14,u,u,u,u],zero,zero,ymm11[15,u,u,u,u],zero,zero,ymm11[16,u,u,u,u],zero,zero,ymm11[17,u,u]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm0 & mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm1))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
; AVX512DQ-NEXT: vpor %ymm0, %ymm1, %ymm0
@@ -4557,15 +4557,15 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
; AVX512DQ-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
; AVX512DQ-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm0))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm2 = ymm2 ^ (mem & (ymm2 ^ ymm1))
; AVX512DQ-NEXT: vmovdqa %ymm2, 192(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm7, 128(%rax)
@@ -4602,7 +4602,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm10, %zmm13, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm15 = zmm15 ^ (mem & (zmm15 ^ zmm7))
; AVX512DQ-FCP-NEXT: vmovdqa (%r10), %xmm10
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,0,1,0,0,0,0]
@@ -4619,8 +4619,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm7, %zmm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm7 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm7 = zmm16 ^ (mem & (zmm7 ^ zmm16))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm15))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm15 = ymm3[0,1,14],zero,ymm3[u,u,0,1,14,15],zero,ymm3[u,u,13,2,3,16],zero,ymm3[u,u,28,29,16,17],zero,ymm3[u,u,19,28,29,18],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm15, %ymm0
@@ -4635,7 +4635,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm8, %zmm9
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm1[u],zero,zero,zero,zero,ymm1[14],zero,ymm1[u],zero,zero,zero,zero,ymm1[15],zero,ymm1[u],zero,zero,zero,zero,ymm1[16],zero,ymm1[u],zero,zero,zero,zero,ymm1[17],zero,ymm1[u],zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[13,u,u,u,u,u],zero,ymm2[14,u,u,u,u,u],zero,ymm2[15,u,u,u,u,u],zero,ymm2[16,u,u,u,u,u],zero,ymm2[17,u,u,u]
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm8, %ymm0
@@ -4651,8 +4651,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm17, %ymm12
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm0 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm9))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
@@ -4671,7 +4671,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
; AVX512DQ-FCP-NEXT: vporq %zmm9, %zmm10, %zmm9
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[20],zero,ymm2[18],zero,zero,zero,zero,ymm2[21],zero,ymm2[19],zero,zero,zero,zero,ymm2[22]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm10, %zmm0, %zmm0
@@ -4687,8 +4687,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm10 = zmm10 ^ (mem & (zmm10 ^ zmm0))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm10 ^ (mem & (zmm10 ^ zmm9))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u],zero,ymm6[30],zero,ymm6[28,u,u,u],zero,ymm6[31],zero,ymm6[29,u]
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm5, %ymm0
@@ -4697,15 +4697,15 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm3[28],zero,ymm3[30,31,30,31],zero,ymm3[29],zero,ymm3[31,28,29]
; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm3 = ymm3 ^ (mem & (ymm3 ^ ymm0))
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm2[27,u,u,u],zero,ymm2[30],zero,ymm2[28,u,u,u],zero,ymm2[31],zero
; AVX512DQ-FCP-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm0))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm1 = ymm1 ^ (mem & (ymm1 ^ ymm3))
; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, 192(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 128(%rax)
@@ -8841,7 +8841,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm1, %zmm9
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm1, %zmm22
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm22 = zmm6 ^ (zmm1 & (zmm22 ^ zmm6))
; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
; AVX512-NEXT: # ymm6 = mem[2,3,2,3]
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -8850,7 +8850,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: # ymm27 = mem[2,3,2,3]
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512-NEXT: vpternlogq $226, %zmm6, %zmm1, %zmm27
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm27 = zmm6 ^ (zmm1 & (zmm27 ^ zmm6))
; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
; AVX512-NEXT: vmovdqa64 %xmm24, %xmm0
; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm1
@@ -8873,15 +8873,15 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 64-byte Folded Reload
; AVX512-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm13
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm11))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
; AVX512-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 64-byte Folded Reload
; AVX512-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm21 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-NEXT: vpternlogq $184, %zmm13, %zmm21, %zmm11
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm21 & (zmm11 ^ zmm13))
; AVX512-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
; AVX512-NEXT: vpermq {{.*#+}} zmm7 = zmm17[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $226, %zmm8, %zmm21, %zmm7
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm21 & (zmm7 ^ zmm8))
; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm15[0,1,0,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm14[0,1,0,1]
@@ -8893,40 +8893,40 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-NEXT: vporq %zmm26, %zmm23, %zmm17
; AVX512-NEXT: vpermq {{.*#+}} zmm18 = zmm20[2,3,2,3,6,7,6,7]
; AVX512-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogq $226, %zmm18, %zmm21, %zmm17
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm17 = zmm18 ^ (zmm21 & (zmm17 ^ zmm18))
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm12 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm12
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm8))
; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 32-byte Folded Reload
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm19
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm19
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 | (zmm8 & mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (mem & (zmm19 ^ zmm12))
; AVX512-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
; AVX512-NEXT: # zmm8 = mem[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm8
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm11))
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm15[0,0,1,0]
; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm14, %zmm11
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm30, %zmm11
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm11
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm30))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm22))
; AVX512-NEXT: vpermq {{.*#+}} zmm0 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm0
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 = zmm28 ^ (mem & (zmm0 ^ zmm28))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm7))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
; AVX512-NEXT: # zmm3 = zmm3[0,1,2,3],mem[2,3,2,3]
-; AVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm9
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm9
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm3 & mem)
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm27))
; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512-NEXT: vpermq {{.*#+}} zmm3 = zmm6[0,1,0,1,4,5,4,5]
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm1))
; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
; AVX512-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,0,4,4,5,4]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm5
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm1))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm16
+; AVX512-NEXT: vpternlogd {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm1))
+; AVX512-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm17))
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: vmovdqa64 %zmm16, 128(%rax)
; AVX512-NEXT: vmovdqa64 %zmm5, (%rax)
@@ -9224,21 +9224,21 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm20 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm20
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm8))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm30 = zmm8[2,3,2,3,6,7,6,7]
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512-FCP-NEXT: vpternlogq $184, %zmm20, %zmm8, %zmm30
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm8 & (zmm30 ^ zmm20))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm19[0,1,0,1,4,5,4,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm17[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm20
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm5 ^ (zmm8 & (zmm20 ^ zmm5))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm1[2,3,2,3,6,7,6,7]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm5[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm25, %zmm8, %zmm1
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm25 ^ (zmm8 & (zmm1 ^ zmm25))
; AVX512-FCP-NEXT: vmovdqa64 %xmm16, %xmm4
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm5
@@ -9247,7 +9247,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm5, %xmm5
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm14, %zmm8, %zmm10
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm14 ^ (zmm8 & (zmm10 ^ zmm14))
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[2,3,2,3],zmm5[0,1,0,1]
; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
; AVX512-FCP-NEXT: # ymm11 = mem[2,3,2,3]
@@ -9256,7 +9256,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm29[2,3,2,3]
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512-FCP-NEXT: vpternlogq $226, %zmm11, %zmm8, %zmm14
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm11 ^ (zmm8 & (zmm14 ^ zmm11))
; AVX512-FCP-NEXT: vmovdqa64 %xmm23, %xmm0
; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm0[1,1,0,0,4,5,6,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,1,0,1,2,0,0,1]
@@ -9286,38 +9286,38 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm8, %zmm8
; AVX512-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm30, %zmm13
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm30))
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm17
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 ^ (mem & (zmm17 ^ zmm6))
; AVX512-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm18
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 | (zmm3 & mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 ^ (mem & (zmm18 ^ zmm17))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm5, %zmm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm5
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm3 ^ (mem & (zmm5 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm20))
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm26[0,1,0,1,4,5,4,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm9[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm16, %zmm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm4
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm3 ^ (mem & (zmm4 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm6))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm6
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm6
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm1))
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm0
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm0
+; AVX512-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm21))
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm10))
; AVX512-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512-FCP-NEXT: # zmm1 = zmm1[0,1,2,3],mem[2,3,2,3]
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm8
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm8
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm1 & mem)
+; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm14))
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, 320(%rax)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 192(%rax)
@@ -9629,7 +9629,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm9
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm1, %zmm9
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm1, %zmm22
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm22 = zmm6 ^ (zmm1 & (zmm22 ^ zmm6))
; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
; AVX512DQ-NEXT: # ymm6 = mem[2,3,2,3]
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
@@ -9638,7 +9638,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: # ymm27 = mem[2,3,2,3]
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27
-; AVX512DQ-NEXT: vpternlogq $226, %zmm6, %zmm1, %zmm27
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm27 = zmm6 ^ (zmm1 & (zmm27 ^ zmm6))
; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
; AVX512DQ-NEXT: vmovdqa64 %xmm24, %xmm0
; AVX512DQ-NEXT: vpshufb %xmm0, %xmm1, %xmm1
@@ -9661,15 +9661,15 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 64-byte Folded Reload
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm13
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm11))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
; AVX512DQ-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 64-byte Folded Reload
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm21 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-NEXT: vpternlogq $184, %zmm13, %zmm21, %zmm11
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (zmm21 & (zmm11 ^ zmm13))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm7 = zmm17[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm8, %zmm21, %zmm7
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm21 & (zmm7 ^ zmm8))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm15[0,1,0,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm14[0,1,0,1]
@@ -9681,40 +9681,40 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-NEXT: vporq %zmm26, %zmm23, %zmm17
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm18 = zmm20[2,3,2,3,6,7,6,7]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogq $226, %zmm18, %zmm21, %zmm17
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm17 = zmm18 ^ (zmm21 & (zmm17 ^ zmm18))
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm12 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm12
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm12 = zmm12 ^ (mem & (zmm12 ^ zmm8))
; AVX512DQ-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm19
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm19
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 | (zmm8 & mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm19 = zmm19 ^ (mem & (zmm19 ^ zmm12))
; AVX512DQ-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm8 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm8
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm11))
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm15[0,0,1,0]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm14, %zmm11
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm30, %zmm11
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm11
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm30))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm11 = zmm11 ^ (mem & (zmm11 ^ zmm22))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm0 = zmm31[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm0
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm0 = zmm28 ^ (mem & (zmm0 ^ zmm28))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm7))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
; AVX512DQ-NEXT: # zmm3 = zmm3[0,1,2,3],mem[2,3,2,3]
-; AVX512DQ-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm9
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm9
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm3 & mem)
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm9 = zmm9 ^ (mem & (zmm9 ^ zmm27))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm3 = zmm6[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm3 = zmm3 ^ (mem & (zmm3 ^ zmm1))
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm10[0,1,0,1,4,5,4,5]
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,0,4,4,5,4]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm5
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm1))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm3))
; AVX512DQ-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512DQ-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm16
+; AVX512DQ-NEXT: vpternlogd {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm1))
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm16 = zmm16 ^ (mem & (zmm16 ^ zmm17))
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: vmovdqa64 %zmm16, 128(%rax)
; AVX512DQ-NEXT: vmovdqa64 %zmm5, (%rax)
@@ -10012,21 +10012,21 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm20 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm20 ^ (mem & (zmm20 ^ zmm8))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm30 = zmm8[2,3,2,3,6,7,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $184, %zmm20, %zmm8, %zmm30
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm30 = zmm30 ^ (zmm8 & (zmm30 ^ zmm20))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm19[0,1,0,1,4,5,4,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm20 = zmm17[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm5, %zmm8, %zmm20
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm20 = zmm5 ^ (zmm8 & (zmm20 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm25 = zmm1[2,3,2,3,6,7,6,7]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm1 = zmm5[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm25, %zmm8, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm25 ^ (zmm8 & (zmm1 ^ zmm25))
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm16, %xmm4
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm5, %xmm5
@@ -10035,7 +10035,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm5, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm14, %zmm8, %zmm10
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm10 = zmm14 ^ (zmm8 & (zmm10 ^ zmm14))
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm21 = zmm21[2,3,2,3],zmm5[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: # ymm11 = mem[2,3,2,3]
@@ -10044,7 +10044,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm14 = ymm29[2,3,2,3]
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %zmm11, %zmm8, %zmm14
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm14 = zmm11 ^ (zmm8 & (zmm14 ^ zmm11))
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm23, %xmm0
; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm8 = xmm0[1,1,0,0,4,5,6,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [0,1,0,1,2,0,0,1]
@@ -10074,38 +10074,38 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm8, %zmm8
; AVX512DQ-FCP-NEXT: vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm13 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm30, %zmm13
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm13 = zmm13 ^ (mem & (zmm13 ^ zmm30))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm17
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm17 = zmm17 ^ (mem & (zmm17 ^ zmm6))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm18
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 | (zmm3 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm18 = zmm18 ^ (mem & (zmm18 ^ zmm17))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm31, %zmm5, %zmm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm5 = zmm28[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm20, %zmm5
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm5 = zmm3 ^ (mem & (zmm5 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 ^ (mem & (zmm5 ^ zmm20))
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm26[0,1,0,1,4,5,4,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm6 = zmm9[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm16, %zmm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm4 = zmm7[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT: vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm4
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm4 = zmm3 ^ (mem & (zmm4 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm4 = zmm4 ^ (mem & (zmm4 ^ zmm6))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm15, %zmm6
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm6
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm3))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm1))
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm0
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm0
+; AVX512DQ-FCP-NEXT: vpternlogd {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm21))
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm10))
; AVX512DQ-FCP-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT: vshufi64x2 $84, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
; AVX512DQ-FCP-NEXT: # zmm1 = zmm1[0,1,2,3],mem[2,3,2,3]
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm8
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm8
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 | (zmm1 & mem)
+; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm8 = zmm8 ^ (mem & (zmm8 ^ zmm14))
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, 320(%rax)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 192(%rax)
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-512.ll b/llvm/test/CodeGen/X86/vector-lzcnt-512.ll
index efecfa47eb4abb..a722a5aee873b5 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-512.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-512.ll
@@ -34,7 +34,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
@@ -59,7 +59,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm1, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpand %ymm1, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -113,7 +113,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
@@ -138,7 +138,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm1, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpand %ymm1, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -190,7 +190,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
@@ -217,7 +217,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm1
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = ~zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -277,7 +277,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
-; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
@@ -304,7 +304,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm1
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm0, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = ~zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
diff --git a/llvm/test/CodeGen/X86/vector-tzcnt-512.ll b/llvm/test/CodeGen/X86/vector-tzcnt-512.ll
index 368fcd3e0e9a10..97b988880fac4c 100644
--- a/llvm/test/CodeGen/X86/vector-tzcnt-512.ll
+++ b/llvm/test/CodeGen/X86/vector-tzcnt-512.ll
@@ -8,7 +8,7 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
@@ -18,7 +18,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512CDBW-LABEL: testv8i64:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
@@ -28,7 +28,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512BW-LABEL: testv8i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -46,7 +46,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
@@ -54,7 +54,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; BITALG-LABEL: testv8i64:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
@@ -68,7 +68,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
@@ -78,7 +78,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512CDBW-LABEL: testv8i64u:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
@@ -88,7 +88,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512BW-LABEL: testv8i64u:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -106,7 +106,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv8i64u:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
@@ -114,7 +114,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; BITALG-LABEL: testv8i64u:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
@@ -128,7 +128,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
@@ -138,7 +138,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512CDBW-LABEL: testv16i32:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
@@ -148,7 +148,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512BW-LABEL: testv16i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -170,7 +170,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
@@ -178,7 +178,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; BITALG-LABEL: testv16i32:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
@@ -196,7 +196,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
@@ -206,7 +206,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512CDBW-LABEL: testv16i32u:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
@@ -216,7 +216,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512BW-LABEL: testv16i32u:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -238,7 +238,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv16i32u:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
@@ -246,7 +246,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; BITALG-LABEL: testv16i32u:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
@@ -296,7 +296,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; AVX512CDBW-LABEL: testv32i16:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -315,7 +315,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; AVX512BW-LABEL: testv32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -351,7 +351,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; BITALG-LABEL: testv32i16:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntw %zmm0, %zmm0
@@ -395,7 +395,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; AVX512CDBW-LABEL: testv32i16u:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -414,7 +414,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; AVX512BW-LABEL: testv32i16u:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -450,7 +450,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; BITALG-LABEL: testv32i16u:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntw %zmm0, %zmm0
@@ -488,7 +488,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; AVX512CDBW-LABEL: testv64i8:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -504,7 +504,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; AVX512BW-LABEL: testv64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -546,7 +546,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; BITALG-LABEL: testv64i8:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
@@ -584,7 +584,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; AVX512CDBW-LABEL: testv64i8u:
; AVX512CDBW: # %bb.0:
-; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -600,7 +600,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; AVX512BW-LABEL: testv64i8u:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
@@ -642,7 +642,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; BITALG-LABEL: testv64i8u:
; BITALG: # %bb.0:
-; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
More information about the llvm-commits
mailing list