[llvm] r346903 - [X86] Update masked expandload/compressstore test names
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 14 14:44:08 PST 2018
Author: rksimon
Date: Wed Nov 14 14:44:08 2018
New Revision: 346903
URL: http://llvm.org/viewvc/llvm-project?rev=346903&view=rev
Log:
[X86] Update masked expandload/compressstore test names
Modified:
llvm/trunk/test/CodeGen/X86/compress_expand.ll
Modified: llvm/trunk/test/CodeGen/X86/compress_expand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/compress_expand.ll?rev=346903&r1=346902&r2=346903&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/compress_expand.ll (original)
+++ llvm/trunk/test/CodeGen/X86/compress_expand.ll Wed Nov 14 14:44:08 2018
@@ -5,17 +5,15 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-
-
-define <16 x float> @test1(float* %base) {
-; SKX-LABEL: test1:
+define <16 x float> @expandload_v16f32_const_undef(float* %base) {
+; SKX-LABEL: expandload_v16f32_const_undef:
; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
; SKX-NEXT: retq
;
-; KNL-LABEL: test1:
+; KNL-LABEL: expandload_v16f32_const_undef:
; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
@@ -25,15 +23,15 @@ define <16 x float> @test1(float* %base)
ret <16 x float>%res
}
-define <16 x float> @test2(float* %base, <16 x float> %src0) {
-; SKX-LABEL: test2:
+define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) {
+; SKX-LABEL: expandload_v16f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movw $30719, %ax # imm = 0x77FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test2:
+; KNL-LABEL: expandload_v16f32_const:
; KNL: # %bb.0:
; KNL-NEXT: movw $30719, %ax # imm = 0x77FF
; KNL-NEXT: kmovw %eax, %k1
@@ -43,15 +41,15 @@ define <16 x float> @test2(float* %base,
ret <16 x float>%res
}
-define <8 x double> @test3(double* %base, <8 x double> %src0, <8 x i1> %mask) {
-; SKX-LABEL: test3:
+define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) {
+; SKX-LABEL: expandload_v8f64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test3:
+; KNL-LABEL: expandload_v8f64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
@@ -62,15 +60,15 @@ define <8 x double> @test3(double* %base
ret <8 x double>%res
}
-define <4 x float> @test4(float* %base, <4 x float> %src0) {
-; SKX-LABEL: test4:
+define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) {
+; SKX-LABEL: expandload_v4f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test4:
+; KNL-LABEL: expandload_v4f32_const:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movw $7, %ax
@@ -82,15 +80,15 @@ define <4 x float> @test4(float* %base,
ret <4 x float>%res
}
-define <2 x i64> @test5(i64* %base, <2 x i64> %src0) {
-; SKX-LABEL: test5:
+define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) {
+; SKX-LABEL: expandload_v2i64_const:
; SKX: # %bb.0:
; SKX-NEXT: movb $2, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test5:
+; KNL-LABEL: expandload_v2i64_const:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movb $2, %al
@@ -107,8 +105,8 @@ declare <8 x double> @llvm.masked.expand
declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
-define void @test6(float* %base, <16 x float> %V) {
-; SKX-LABEL: test6:
+define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
+; SKX-LABEL: compressstore_v16f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
@@ -116,7 +114,7 @@ define void @test6(float* %base, <16 x f
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test6:
+; KNL-LABEL: compressstore_v16f32_const:
; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
@@ -126,8 +124,8 @@ define void @test6(float* %base, <16 x f
ret void
}
-define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
-; SKX-LABEL: test7:
+define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) {
+; SKX-LABEL: compressstore_v8f32_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
@@ -135,7 +133,7 @@ define void @test7(float* %base, <8 x fl
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test7:
+; KNL-LABEL: compressstore_v8f32_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
@@ -147,8 +145,8 @@ define void @test7(float* %base, <8 x fl
ret void
}
-define void @test8(double* %base, <8 x double> %V, <8 x i1> %mask) {
-; SKX-LABEL: test8:
+define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) {
+; SKX-LABEL: compressstore_v8f64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
@@ -156,7 +154,7 @@ define void @test8(double* %base, <8 x d
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test8:
+; KNL-LABEL: compressstore_v8f64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
@@ -167,8 +165,8 @@ define void @test8(double* %base, <8 x d
ret void
}
-define void @test9(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
-; SKX-LABEL: test9:
+define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
+; SKX-LABEL: compressstore_v8i64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
@@ -176,7 +174,7 @@ define void @test9(i64* %base, <8 x i64>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test9:
+; KNL-LABEL: compressstore_v8i64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
@@ -187,8 +185,8 @@ define void @test9(i64* %base, <8 x i64>
ret void
}
-define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
-; SKX-LABEL: test10:
+define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
+; SKX-LABEL: compressstore_v4i64_v4i1:
; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpmovd2m %xmm1, %k1
@@ -196,7 +194,7 @@ define void @test10(i64* %base, <4 x i64
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test10:
+; KNL-LABEL: compressstore_v4i64_v4i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
@@ -209,15 +207,15 @@ define void @test10(i64* %base, <4 x i64
ret void
}
-define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
-; SKX-LABEL: test11:
+define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
+; SKX-LABEL: compressstore_v2i64_v2i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vpmovq2m %xmm1, %k1
; SKX-NEXT: vpcompressq %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test11:
+; KNL-LABEL: compressstore_v2i64_v2i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
@@ -230,15 +228,15 @@ define void @test11(i64* %base, <2 x i64
ret void
}
-define void @test12(float* %base, <4 x float> %V, <4 x i1> %mask) {
-; SKX-LABEL: test12:
+define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) {
+; SKX-LABEL: compressstore_v4f32_v4i1:
; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpmovd2m %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test12:
+; KNL-LABEL: compressstore_v4f32_v4i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
@@ -251,8 +249,8 @@ define void @test12(float* %base, <4 x f
ret void
}
-define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
-; SKX-LABEL: test13:
+define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
+; SKX-LABEL: expandload_v2f32_v2i1:
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -260,7 +258,7 @@ define <2 x float> @test13(float* %base,
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test13:
+; KNL-LABEL: expandload_v2f32_v2i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -276,8 +274,8 @@ define <2 x float> @test13(float* %base,
ret <2 x float> %res
}
-define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
-; SKX-LABEL: test14:
+define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
+; SKX-LABEL: compressstore_v2f32_v2i32:
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
@@ -285,7 +283,7 @@ define void @test14(float* %base, <2 x f
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
-; KNL-LABEL: test14:
+; KNL-LABEL: compressstore_v2f32_v2i32:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
@@ -300,8 +298,8 @@ define void @test14(float* %base, <2 x f
ret void
}
-define <32 x float> @test15(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
-; ALL-LABEL: test15:
+define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
+; ALL-LABEL: expandload_v32f32_v32i32:
; ALL: # %bb.0:
; ALL-NEXT: vptestnmd %zmm3, %zmm3, %k1
; ALL-NEXT: vptestnmd %zmm2, %zmm2, %k2
@@ -315,8 +313,8 @@ define <32 x float> @test15(float* %base
ret <32 x float> %res
}
-define <16 x double> @test16(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
-; SKX-LABEL: test16:
+define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
+; SKX-LABEL: compressstore_v16f64_v16i32:
; SKX: # %bb.0:
; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1
@@ -327,7 +325,7 @@ define <16 x double> @test16(double* %ba
; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2}
; SKX-NEXT: retq
;
-; KNL-LABEL: test16:
+; KNL-LABEL: compressstore_v16f64_v16i32:
; KNL: # %bb.0:
; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
@@ -343,8 +341,8 @@ define <16 x double> @test16(double* %ba
ret <16 x double> %res
}
-define void @test17(float* %base, <32 x float> %V, <32 x i32> %trigger) {
-; SKX-LABEL: test17:
+define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
+; SKX-LABEL: compressstore_v32f32_v32i32:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm3, %zmm3, %k1
; SKX-NEXT: vptestnmd %zmm2, %zmm2, %k2
@@ -355,7 +353,7 @@ define void @test17(float* %base, <32 x
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test17:
+; KNL-LABEL: compressstore_v32f32_v32i32:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2
@@ -369,8 +367,8 @@ define void @test17(float* %base, <32 x
ret void
}
-define void @test18(double* %base, <16 x double> %V, <16 x i1> %mask) {
-; SKX-LABEL: test18:
+define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) {
+; SKX-LABEL: compressstore_v16f64_v16i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
@@ -382,7 +380,7 @@ define void @test18(double* %base, <16 x
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
-; KNL-LABEL: test18:
+; KNL-LABEL: compressstore_v16f64_v16i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
More information about the llvm-commits
mailing list