[llvm] 59d48ea - [X86] Add test file that was supposed to go with D81327.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 21 20:27:19 PDT 2020


Author: Craig Topper
Date: 2020-06-21T20:23:44-07:00
New Revision: 59d48eadd0ae164abc9953698eb0acc8af2f1a58

URL: https://github.com/llvm/llvm-project/commit/59d48eadd0ae164abc9953698eb0acc8af2f1a58
DIFF: https://github.com/llvm/llvm-project/commit/59d48eadd0ae164abc9953698eb0acc8af2f1a58.diff

LOG: [X86] Add test file that was supposed to go with D81327.

Must have forgotten to git add the file.

Added: 
    llvm/test/CodeGen/X86/avx512-movmsk.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512-movmsk.ll b/llvm/test/CodeGen/X86/avx512-movmsk.ll
new file mode 100644
index 000000000000..1f7fc15fdee6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx512-movmsk.ll
@@ -0,0 +1,167 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl | FileCheck %s --check-prefixes=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512dq,avx512bw | FileCheck %s --check-prefixes=AVX512VLDQBW
+
+; This test makes sure we don't use movmsk instructions when masked compares
+; would be better. The use of the getmant intrinsic introduces a convertion
+; scalar to vXi1 late after movmsk has been formed. Requiring it to be reversed.
+
+declare <2 x double> @llvm.x86.avx512.mask.getmant.pd.128(<2 x double>, i32, <2 x double>, i8)
+declare <4 x double> @llvm.x86.avx512.mask.getmant.pd.256(<4 x double>, i32, <4 x double>, i8)
+declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
+declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
+
+define <2 x double> @movmsk2(<2 x double> %x0, <2 x double> %x2, <2 x i64> %mask) {
+; AVX512VL-LABEL: movmsk2:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk2:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovq2m %xmm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %xmm0, %xmm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %xmm1, %xmm0
+; AVX512VLDQBW-NEXT:    retq
+  %a = icmp slt <2 x i64> %mask, zeroinitializer
+  %b = bitcast <2 x i1> %a to i2
+  %c = zext i2 %b to i8
+  %res = call <2 x double> @llvm.x86.avx512.mask.getmant.pd.128(<2 x double> %x0, i32 11, <2 x double> %x2, i8 %c)
+  ret <2 x double> %res
+}
+
+define <4 x double> @movmsk4(<4 x double> %x0, <4 x double> %x2, <4 x i32> %mask) {
+; AVX512VL-LABEL: movmsk4:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk4:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovd2m %xmm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %ymm0, %ymm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %ymm1, %ymm0
+; AVX512VLDQBW-NEXT:    retq
+  %a = icmp slt <4 x i32> %mask, zeroinitializer
+  %b = bitcast <4 x i1> %a to i4
+  %c = zext i4 %b to i8
+  %res = call <4 x double> @llvm.x86.avx512.mask.getmant.pd.256(<4 x double> %x0, i32 11, <4 x double> %x2, i8 %c)
+  ret <4 x double> %res
+}
+
+define <8 x double> @movmsk8(<8 x double> %x0, <8 x double> %x2, <8 x i32> %mask) {
+; AVX512VL-LABEL: movmsk8:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk8:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovd2m %ymm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %zmm1, %zmm0
+; AVX512VLDQBW-NEXT:    retq
+  %a = icmp slt <8 x i32> %mask, zeroinitializer
+  %b = bitcast <8 x i1> %a to i8
+  %res = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 %b, i32 4)
+  ret <8 x double> %res
+}
+
+define <16 x float> @movmsk16(<16 x float> %x0, <16 x float> %x2, <16 x i8> %mask) {
+; AVX512VL-LABEL: movmsk16:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpmovmskb %xmm2, %eax
+; AVX512VL-NEXT:    kmovw %eax, %k1
+; AVX512VL-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovaps %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk16:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovb2m %xmm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovaps %zmm1, %zmm0
+; AVX512VLDQBW-NEXT:    retq
+  %a = icmp slt <16 x i8> %mask, zeroinitializer
+  %b = bitcast <16 x i1> %a to i16
+  %res = call <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float> %x0, i32 11, <16 x float> %x2, i16 %b, i32 4)
+  ret <16 x float> %res
+}
+
+; Similar to above but with fp types bitcasted to int for the slt.
+define <2 x double> @movmsk2_fp(<2 x double> %x0, <2 x double> %x2, <2 x double> %mask) {
+; AVX512VL-LABEL: movmsk2_fp:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtq %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %xmm0, %xmm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk2_fp:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovq2m %xmm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %xmm0, %xmm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %xmm1, %xmm0
+; AVX512VLDQBW-NEXT:    retq
+  %q = bitcast <2 x double> %mask to <2 x i64>
+  %a = icmp slt <2 x i64> %q, zeroinitializer
+  %b = bitcast <2 x i1> %a to i2
+  %c = zext i2 %b to i8
+  %res = call <2 x double> @llvm.x86.avx512.mask.getmant.pd.128(<2 x double> %x0, i32 11, <2 x double> %x2, i8 %c)
+  ret <2 x double> %res
+}
+
+define <4 x double> @movmsk4_fp(<4 x double> %x0, <4 x double> %x2, <4 x float> %mask) {
+; AVX512VL-LABEL: movmsk4_fp:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %xmm2, %xmm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk4_fp:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovd2m %xmm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %ymm0, %ymm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %ymm1, %ymm0
+; AVX512VLDQBW-NEXT:    retq
+  %q = bitcast <4 x float> %mask to <4 x i32>
+  %a = icmp slt <4 x i32> %q, zeroinitializer
+  %b = bitcast <4 x i1> %a to i4
+  %c = zext i4 %b to i8
+  %res = call <4 x double> @llvm.x86.avx512.mask.getmant.pd.256(<4 x double> %x0, i32 11, <4 x double> %x2, i8 %c)
+  ret <4 x double> %res
+}
+
+define <8 x double> @movmsk8_fp(<8 x double> %x0, <8 x double> %x2, <8 x float> %mask) {
+; AVX512VL-LABEL: movmsk8_fp:
+; AVX512VL:       ## %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpcmpgtd %ymm2, %ymm3, %k1
+; AVX512VL-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; AVX512VL-NEXT:    vmovapd %zmm1, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQBW-LABEL: movmsk8_fp:
+; AVX512VLDQBW:       ## %bb.0:
+; AVX512VLDQBW-NEXT:    vpmovd2m %ymm2, %k1
+; AVX512VLDQBW-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; AVX512VLDQBW-NEXT:    vmovapd %zmm1, %zmm0
+; AVX512VLDQBW-NEXT:    retq
+  %q = bitcast <8 x float> %mask to <8 x i32>
+  %a = icmp slt <8 x i32> %q, zeroinitializer
+  %b = bitcast <8 x i1> %a to i8
+  %res = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 %b, i32 4)
+  ret <8 x double> %res
+}


        


More information about the llvm-commits mailing list