[llvm] c5b2371 - [X86] Add masked versions of the VPTERNLOG test cases added for D83630. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 25 16:38:34 PDT 2020


Author: Craig Topper
Date: 2020-07-25T16:37:17-07:00
New Revision: c5b23714368eabfb22fcd7f7567cf2a9830c8d8b

URL: https://github.com/llvm/llvm-project/commit/c5b23714368eabfb22fcd7f7567cf2a9830c8d8b
DIFF: https://github.com/llvm/llvm-project/commit/c5b23714368eabfb22fcd7f7567cf2a9830c8d8b.diff

LOG: [X86] Add masked versions of the VPTERNLOG test cases added for D83630. NFC

We don't handle these yet and D83630 won't improve that, but
at least we'll have the tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx512-logic.ll
    llvm/test/CodeGen/X86/avx512vl-logic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512-logic.ll b/llvm/test/CodeGen/X86/avx512-logic.ll
index 88a3b5aea9bd..30607214f56d 100644
--- a/llvm/test/CodeGen/X86/avx512-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512-logic.ll
@@ -919,3 +919,139 @@ define <8 x i64> @ternlog_xor_and_mask(<8 x i64> %x, <8 x i64> %y) {
   %b = xor <8 x i64> %a, %y
   ret <8 x i64> %b
 }
+
+define <16 x i32> @ternlog_maskz_or_and_mask(<16 x i32> %x, <16 x i32> %y, <16 x i32> %mask) {
+; KNL-LABEL: ternlog_maskz_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; KNL-NEXT:    vpord %zmm1, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %zmm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %zmm0, %zmm0
+; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <16 x i32> %mask, zeroinitializer
+  %a = and <16 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+  %b = or <16 x i32> %a, %y
+  %c = select <16 x i1> %m, <16 x i32> %b, <16 x i32> zeroinitializer
+  ret <16 x i32> %c
+}
+
+define <8 x i64> @ternlog_maskz_xor_and_mask(<8 x i64> %x, <8 x i64> %y, <8 x i64> %mask) {
+; KNL-LABEL: ternlog_maskz_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandd {{.*}}(%rip), %zmm0, %zmm0
+; KNL-NEXT:    vpxorq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %zmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %zmm0, %zmm0
+; SKX-NEXT:    vxorpd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i64> %mask, zeroinitializer
+  %a = and <8 x i64> %x, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %b = xor <8 x i64> %a, %y
+  %c = select <8 x i1> %m, <8 x i64> %b, <8 x i64> zeroinitializer
+  ret <8 x i64> %c
+}
+
+define <16 x i32> @ternlog_maskx_or_and_mask(<16 x i32> %x, <16 x i32> %y, <16 x i32> %mask) {
+; KNL-LABEL: ternlog_maskx_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm2
+; KNL-NEXT:    vpord %zmm1, %zmm2, %zmm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %zmm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %zmm0, %zmm2
+; SKX-NEXT:    vorps %zmm1, %zmm2, %zmm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <16 x i32> %mask, zeroinitializer
+  %a = and <16 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+  %b = or <16 x i32> %a, %y
+  %c = select <16 x i1> %m, <16 x i32> %b, <16 x i32> %x
+  ret <16 x i32> %c
+}
+
+define <16 x i32> @ternlog_masky_or_and_mask(<16 x i32> %x, <16 x i32> %y, <16 x i32> %mask) {
+; KNL-LABEL: ternlog_masky_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; KNL-NEXT:    vpord %zmm1, %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %zmm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %zmm0, %zmm0
+; SKX-NEXT:    vorps %zmm1, %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovaps %zmm1, %zmm0
+; SKX-NEXT:    retq
+  %m = icmp slt <16 x i32> %mask, zeroinitializer
+  %a = and <16 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+  %b = or <16 x i32> %a, %y
+  %c = select <16 x i1> %m, <16 x i32> %b, <16 x i32> %y
+  ret <16 x i32> %c
+}
+
+define <8 x i64> @ternlog_maskx_xor_and_mask(<8 x i64> %x, <8 x i64> %y, <8 x i64> %mask) {
+; KNL-LABEL: ternlog_maskx_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandd {{.*}}(%rip), %zmm0, %zmm2
+; KNL-NEXT:    vpxorq %zmm1, %zmm2, %zmm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %zmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %zmm0, %zmm2
+; SKX-NEXT:    vxorpd %zmm1, %zmm2, %zmm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i64> %mask, zeroinitializer
+  %a = and <8 x i64> %x, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %b = xor <8 x i64> %a, %y
+  %c = select <8 x i1> %m, <8 x i64> %b, <8 x i64> %x
+  ret <8 x i64> %c
+}
+
+define <8 x i64> @ternlog_masky_xor_and_mask(<8 x i64> %x, <8 x i64> %y, <8 x i64> %mask) {
+; KNL-LABEL: ternlog_masky_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %zmm2, %zmm3, %k1
+; KNL-NEXT:    vpandd {{.*}}(%rip), %zmm0, %zmm0
+; KNL-NEXT:    vpxorq %zmm1, %zmm0, %zmm1 {%k1}
+; KNL-NEXT:    vmovdqa64 %zmm1, %zmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %zmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %zmm0, %zmm0
+; SKX-NEXT:    vxorpd %zmm1, %zmm0, %zmm1 {%k1}
+; SKX-NEXT:    vmovapd %zmm1, %zmm0
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i64> %mask, zeroinitializer
+  %a = and <8 x i64> %x, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+  %b = xor <8 x i64> %a, %y
+  %c = select <8 x i1> %m, <8 x i64> %b, <8 x i64> %y
+  ret <8 x i64> %c
+}

diff  --git a/llvm/test/CodeGen/X86/avx512vl-logic.ll b/llvm/test/CodeGen/X86/avx512vl-logic.ll
index 26d905ebeae7..3f0ce3092847 100644
--- a/llvm/test/CodeGen/X86/avx512vl-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-logic.ll
@@ -1031,3 +1031,273 @@ define <4 x i64> @ternlog_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y) {
   %b = xor <4 x i64> %a, %y
   ret <4 x i64> %b
 }
+
+define <4 x i32> @ternlog_maskz_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z, <4 x i32> %mask) {
+; KNL-LABEL: ternlog_maskz_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vpcmpgtd %xmm3, %xmm2, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    vpord %xmm1, %xmm0, %xmm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %xmm3, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; SKX-NEXT:    vorps %xmm1, %xmm0, %xmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i32> %mask, zeroinitializer
+  %a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+  %b = or <4 x i32> %a, %y
+  %c = select <4 x i1> %m, <4 x i32> %b, <4 x i32> zeroinitializer
+  ret <4 x i32> %c
+}
+
+define <8 x i32> @ternlog_maskz_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
+; KNL-LABEL: ternlog_maskz_or_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    vpord %ymm1, %ymm0, %ymm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_or_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %ymm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm0
+; SKX-NEXT:    vorps %ymm1, %ymm0, %ymm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i32> %mask, zeroinitializer
+  %a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
+  %b = or <8 x i32> %a, %y
+  %c = select <8 x i1> %m, <8 x i32> %b, <8 x i32> zeroinitializer
+  ret <8 x i32> %c
+}
+
+define <2 x i64> @ternlog_maskz_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
+; KNL-LABEL: ternlog_maskz_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    vpxorq %xmm1, %xmm0, %xmm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %xmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %xmm0, %xmm0
+; SKX-NEXT:    vxorpd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <2 x i64> %mask, zeroinitializer
+  %a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
+  %b = xor <2 x i64> %a, %y
+  %c = select <2 x i1> %m, <2 x i64> %b, <2 x i64> zeroinitializer
+  ret <2 x i64> %c
+}
+
+define <4 x i64> @ternlog_maskz_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
+; KNL-LABEL: ternlog_maskz_xor_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    vpxorq %ymm1, %ymm0, %ymm0 {%k1} {z}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskz_xor_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %ymm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %ymm0, %ymm0
+; SKX-NEXT:    vxorpd %ymm1, %ymm0, %ymm0 {%k1} {z}
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i64> %mask, zeroinitializer
+  %a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
+  %b = xor <4 x i64> %a, %y
+  %c = select <4 x i1> %m, <4 x i64> %b, <4 x i64> zeroinitializer
+  ret <4 x i64> %c
+}
+
+define <4 x i32> @ternlog_maskx_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z, <4 x i32> %mask) {
+; KNL-LABEL: ternlog_maskx_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vpcmpgtd %xmm3, %xmm2, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm2
+; KNL-NEXT:    vpord %xmm1, %xmm2, %xmm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %xmm3, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm2
+; SKX-NEXT:    vorps %xmm1, %xmm2, %xmm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i32> %mask, zeroinitializer
+  %a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+  %b = or <4 x i32> %a, %y
+  %c = select <4 x i1> %m, <4 x i32> %b, <4 x i32> %x
+  ret <4 x i32> %c
+}
+
+define <8 x i32> @ternlog_maskx_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
+; KNL-LABEL: ternlog_maskx_or_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm2
+; KNL-NEXT:    vpord %ymm1, %ymm2, %ymm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_or_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %ymm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm2
+; SKX-NEXT:    vorps %ymm1, %ymm2, %ymm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i32> %mask, zeroinitializer
+  %a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
+  %b = or <8 x i32> %a, %y
+  %c = select <8 x i1> %m, <8 x i32> %b, <8 x i32> %x
+  ret <8 x i32> %c
+}
+
+define <2 x i64> @ternlog_maskx_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
+; KNL-LABEL: ternlog_maskx_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm2
+; KNL-NEXT:    vpxorq %xmm1, %xmm2, %xmm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %xmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %xmm0, %xmm2
+; SKX-NEXT:    vxorpd %xmm1, %xmm2, %xmm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <2 x i64> %mask, zeroinitializer
+  %a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
+  %b = xor <2 x i64> %a, %y
+  %c = select <2 x i1> %m, <2 x i64> %b, <2 x i64> %x
+  ret <2 x i64> %c
+}
+
+define <4 x i64> @ternlog_maskx_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
+; KNL-LABEL: ternlog_maskx_xor_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm2
+; KNL-NEXT:    vpxorq %ymm1, %ymm2, %ymm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_maskx_xor_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %ymm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %ymm0, %ymm2
+; SKX-NEXT:    vxorpd %ymm1, %ymm2, %ymm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i64> %mask, zeroinitializer
+  %a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
+  %b = xor <4 x i64> %a, %y
+  %c = select <4 x i1> %m, <4 x i64> %b, <4 x i64> %x
+  ret <4 x i64> %c
+}
+
+define <4 x i32> @ternlog_masky_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z, <4 x i32> %mask) {
+; KNL-LABEL: ternlog_masky_or_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vpcmpgtd %xmm3, %xmm2, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    vpord %xmm1, %xmm0, %xmm1 {%k1}
+; KNL-NEXT:    vmovdqa %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_or_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %xmm3, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; SKX-NEXT:    vorps %xmm1, %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovaps %xmm1, %xmm0
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i32> %mask, zeroinitializer
+  %a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
+  %b = or <4 x i32> %a, %y
+  %c = select <4 x i1> %m, <4 x i32> %b, <4 x i32> %y
+  ret <4 x i32> %c
+}
+
+define <8 x i32> @ternlog_masky_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
+; KNL-LABEL: ternlog_masky_or_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm2
+; KNL-NEXT:    vpord %ymm1, %ymm2, %ymm0 {%k1}
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_or_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovd2m %ymm2, %k1
+; SKX-NEXT:    vandps {{.*}}(%rip), %ymm0, %ymm2
+; SKX-NEXT:    vorps %ymm1, %ymm2, %ymm0 {%k1}
+; SKX-NEXT:    retq
+  %m = icmp slt <8 x i32> %mask, zeroinitializer
+  %a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
+  %b = or <8 x i32> %a, %y
+  %c = select <8 x i1> %m, <8 x i32> %b, <8 x i32> %x
+  ret <8 x i32> %c
+}
+
+define <2 x i64> @ternlog_masky_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
+; KNL-LABEL: ternlog_masky_xor_and_mask:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; KNL-NEXT:    vpxorq %xmm1, %xmm0, %xmm1 {%k1}
+; KNL-NEXT:    vmovdqa %xmm1, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_xor_and_mask:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %xmm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %xmm0, %xmm0
+; SKX-NEXT:    vxorpd %xmm1, %xmm0, %xmm1 {%k1}
+; SKX-NEXT:    vmovapd %xmm1, %xmm0
+; SKX-NEXT:    retq
+  %m = icmp slt <2 x i64> %mask, zeroinitializer
+  %a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
+  %b = xor <2 x i64> %a, %y
+  %c = select <2 x i1> %m, <2 x i64> %b, <2 x i64> %y
+  ret <2 x i64> %c
+}
+
+define <4 x i64> @ternlog_masky_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
+; KNL-LABEL: ternlog_masky_xor_and_mask_ymm:
+; KNL:       ## %bb.0:
+; KNL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; KNL-NEXT:    vpcmpgtq %ymm2, %ymm3, %k1
+; KNL-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    vpxorq %ymm1, %ymm0, %ymm1 {%k1}
+; KNL-NEXT:    vmovdqa %ymm1, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: ternlog_masky_xor_and_mask_ymm:
+; SKX:       ## %bb.0:
+; SKX-NEXT:    vpmovq2m %ymm2, %k1
+; SKX-NEXT:    vandpd {{.*}}(%rip), %ymm0, %ymm0
+; SKX-NEXT:    vxorpd %ymm1, %ymm0, %ymm1 {%k1}
+; SKX-NEXT:    vmovapd %ymm1, %ymm0
+; SKX-NEXT:    retq
+  %m = icmp slt <4 x i64> %mask, zeroinitializer
+  %a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
+  %b = xor <4 x i64> %a, %y
+  %c = select <4 x i1> %m, <4 x i64> %b, <4 x i64> %y
+  ret <4 x i64> %c
+}


        


More information about the llvm-commits mailing list