[llvm] r281157 - [AVX-512] Add test cases to demonstrate opportunities for commuting vpternlog. Commuting will be added in a future commit.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Sep 10 22:33:43 PDT 2016
Author: ctopper
Date: Sun Sep 11 00:33:43 2016
New Revision: 281157
URL: http://llvm.org/viewvc/llvm-project?rev=281157&view=rev
Log:
[AVX-512] Add test cases to demonstrate opportunities for commuting vpternlog. Commuting will be added in a future commit.
Added:
llvm/trunk/test/CodeGen/X86/avx512-vpternlog-commute.ll
Added: llvm/trunk/test/CodeGen/X86/avx512-vpternlog-commute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vpternlog-commute.ll?rev=281157&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vpternlog-commute.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512-vpternlog-commute.ll Sun Sep 11 00:33:43 2016
@@ -0,0 +1,529 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
+
+; These test cases demonstrate cases where vpternlog could benefit from being commuted.
+
+declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
+declare <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
+
+define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_012:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_102:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm0, %zmm1
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_210:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_012_load0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_012_load1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
+; CHECK-LABEL: vpternlog_v16i32_012_load2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_102_load0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_102_load1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
+; CHECK-LABEL: vpternlog_v16i32_102_load2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_210_load0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_210_load1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm2, %zmm1
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
+; CHECK-LABEL: vpternlog_v16i32_210_load2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_021_load0:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
+; CHECK-LABEL: vpternlog_v16i32_021_load1:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
+; CHECK-LABEL: vpternlog_v16i32_021_load2:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 -1)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load0_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load1_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load2_mask:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm2, %zmm0, %zmm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_012_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_012_load2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm0, %zmm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_102_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_102_load2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm2, %zmm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_210_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_210_load2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load0_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm0, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %x0 = load <16 x i32>, <16 x i32>* %x0ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load1_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x1 = load <16 x i32>, <16 x i32>* %x1ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
+
+define <16 x i32> @vpternlog_v16i32_021_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
+; CHECK-LABEL: vpternlog_v16i32_021_load2_maskz:
+; CHECK: ## BB#0:
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
+; CHECK-NEXT: vpternlogd $33, %zmm1, %zmm2, %zmm0 {%k1} {z}
+; CHECK-NEXT: retq
+ %x2 = load <16 x i32>, <16 x i32>* %x2ptr
+ %res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 33, i16 %mask)
+ ret <16 x i32> %res
+}
More information about the llvm-commits
mailing list