[llvm] 7ae071f - Add more tests for promoting `blendw` -> `blendd`; NFC
Noah Goldstein via llvm-commits
llvm-commits at lists.llvm.org
Sun Feb 26 10:11:58 PST 2023
Author: Noah Goldstein
Date: 2023-02-26T12:11:16-06:00
New Revision: 7ae071f98186751627effeede9beba9df78f04ea
URL: https://github.com/llvm/llvm-project/commit/7ae071f98186751627effeede9beba9df78f04ea
DIFF: https://github.com/llvm/llvm-project/commit/7ae071f98186751627effeede9beba9df78f04ea.diff
LOG: Add more tests for promoting `blendw` -> `blendd`; NFC
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D143788
Added:
llvm/test/CodeGen/X86/shuffle-blendw.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/shuffle-blendw.ll b/llvm/test/CodeGen/X86/shuffle-blendw.ll
new file mode 100644
index 000000000000..b5ce5ee584a4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/shuffle-blendw.ll
@@ -0,0 +1,265 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86-SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64-SSE41
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefix=X86-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefix=X64-AVX512
+
+define <16 x i16> @blendw_to_blendd_32(<16 x i16> %x, <16 x i16> %y, <16 x i16> %z) nounwind {
+; X86-SSE41-LABEL: blendw_to_blendd_32:
+; X86-SSE41: # %bb.0:
+; X86-SSE41-NEXT: pushl %ebp
+; X86-SSE41-NEXT: movl %esp, %ebp
+; X86-SSE41-NEXT: andl $-16, %esp
+; X86-SSE41-NEXT: subl $16, %esp
+; X86-SSE41-NEXT: paddw 40(%ebp), %xmm1
+; X86-SSE41-NEXT: paddw 24(%ebp), %xmm0
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],mem[2,3],xmm1[4,5],mem[6,7]
+; X86-SSE41-NEXT: movl %ebp, %esp
+; X86-SSE41-NEXT: popl %ebp
+; X86-SSE41-NEXT: retl
+;
+; X64-SSE41-LABEL: blendw_to_blendd_32:
+; X64-SSE41: # %bb.0:
+; X64-SSE41-NEXT: paddw %xmm5, %xmm1
+; X64-SSE41-NEXT: paddw %xmm4, %xmm0
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; X64-SSE41-NEXT: retq
+;
+; X86-AVX-LABEL: blendw_to_blendd_32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X86-AVX-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; X86-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; X86-AVX-NEXT: retl
+;
+; X64-AVX-LABEL: blendw_to_blendd_32:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X64-AVX-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; X64-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X64-AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; X64-AVX-NEXT: retq
+;
+; X86-AVX2-LABEL: blendw_to_blendd_32:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; X86-AVX2-NEXT: retl
+;
+; X64-AVX2-LABEL: blendw_to_blendd_32:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; X64-AVX2-NEXT: retq
+;
+; X86-AVX512-LABEL: blendw_to_blendd_32:
+; X86-AVX512: # %bb.0:
+; X86-AVX512-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X86-AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7],ymm0[8,9],ymm1[10,11],ymm0[12,13],ymm1[14,15]
+; X86-AVX512-NEXT: retl
+;
+; X64-AVX512-LABEL: blendw_to_blendd_32:
+; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X64-AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7],ymm0[8,9],ymm1[10,11],ymm0[12,13],ymm1[14,15]
+; X64-AVX512-NEXT: retq
+ %x1 = add <16 x i16> %x, %z
+ %shuffle = shufflevector <16 x i16> %x1, <16 x i16> %y, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
+ ret <16 x i16> %shuffle
+}
+
+define <8 x i16> @blendw_to_blendd_16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %z) nounwind {
+; X86-SSE41-LABEL: blendw_to_blendd_16:
+; X86-SSE41: # %bb.0:
+; X86-SSE41-NEXT: paddw %xmm2, %xmm0
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X86-SSE41-NEXT: retl
+;
+; X64-SSE41-LABEL: blendw_to_blendd_16:
+; X64-SSE41: # %bb.0:
+; X64-SSE41-NEXT: paddw %xmm2, %xmm0
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-SSE41-NEXT: retq
+;
+; X86-AVX-LABEL: blendw_to_blendd_16:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X86-AVX-NEXT: retl
+;
+; X64-AVX-LABEL: blendw_to_blendd_16:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX-NEXT: retq
+;
+; X86-AVX2-LABEL: blendw_to_blendd_16:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X86-AVX2-NEXT: retl
+;
+; X64-AVX2-LABEL: blendw_to_blendd_16:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X64-AVX2-NEXT: retq
+;
+; X86-AVX512-LABEL: blendw_to_blendd_16:
+; X86-AVX512: # %bb.0:
+; X86-AVX512-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X86-AVX512-NEXT: retl
+;
+; X64-AVX512-LABEL: blendw_to_blendd_16:
+; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX512-NEXT: retq
+ %x1 = add <8 x i16> %x, %z
+ %shuffle = shufflevector <8 x i16> %x1, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i16> %shuffle
+}
+
+define <16 x i16> @blendw_to_blendd_fail_32(<16 x i16> %x, <16 x i16> %y, <16 x i16> %z) nounwind {
+; X86-SSE41-LABEL: blendw_to_blendd_fail_32:
+; X86-SSE41: # %bb.0:
+; X86-SSE41-NEXT: pushl %ebp
+; X86-SSE41-NEXT: movl %esp, %ebp
+; X86-SSE41-NEXT: andl $-16, %esp
+; X86-SSE41-NEXT: subl $16, %esp
+; X86-SSE41-NEXT: paddw 40(%ebp), %xmm1
+; X86-SSE41-NEXT: paddw 24(%ebp), %xmm0
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],mem[3],xmm1[4,5],mem[6,7]
+; X86-SSE41-NEXT: movl %ebp, %esp
+; X86-SSE41-NEXT: popl %ebp
+; X86-SSE41-NEXT: retl
+;
+; X64-SSE41-LABEL: blendw_to_blendd_fail_32:
+; X64-SSE41: # %bb.0:
+; X64-SSE41-NEXT: paddw %xmm5, %xmm1
+; X64-SSE41-NEXT: paddw %xmm4, %xmm0
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3],xmm1[4,5],xmm3[6,7]
+; X64-SSE41-NEXT: retq
+;
+; X86-AVX-LABEL: blendw_to_blendd_fail_32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X86-AVX-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; X86-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X86-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [65535,65535,65535,0,65535,65535,0,0,65535,65535,65535,0,65535,65535,0,0]
+; X86-AVX-NEXT: # ymm2 = mem[0,1,0,1]
+; X86-AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vandnps %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
+;
+; X64-AVX-LABEL: blendw_to_blendd_fail_32:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
+; X64-AVX-NEXT: vextractf128 $1, %ymm0, %xmm4
+; X64-AVX-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; X64-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [65535,65535,65535,0,65535,65535,0,0,65535,65535,65535,0,65535,65535,0,0]
+; X64-AVX-NEXT: # ymm2 = mem[0,1,0,1]
+; X64-AVX-NEXT: vandps %ymm2, %ymm0, %ymm0
+; X64-AVX-NEXT: vandnps %ymm1, %ymm2, %ymm1
+; X64-AVX-NEXT: vorps %ymm1, %ymm0, %ymm0
+; X64-AVX-NEXT: retq
+;
+; X86-AVX2-LABEL: blendw_to_blendd_fail_32:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6,7],ymm0[8,9,10],ymm1[11],ymm0[12,13],ymm1[14,15]
+; X86-AVX2-NEXT: retl
+;
+; X64-AVX2-LABEL: blendw_to_blendd_fail_32:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6,7],ymm0[8,9,10],ymm1[11],ymm0[12,13],ymm1[14,15]
+; X64-AVX2-NEXT: retq
+;
+; X86-AVX512-LABEL: blendw_to_blendd_fail_32:
+; X86-AVX512: # %bb.0:
+; X86-AVX512-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X86-AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6,7],ymm0[8,9,10],ymm1[11],ymm0[12,13],ymm1[14,15]
+; X86-AVX512-NEXT: retl
+;
+; X64-AVX512-LABEL: blendw_to_blendd_fail_32:
+; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: vpaddw %ymm2, %ymm0, %ymm0
+; X64-AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6,7],ymm0[8,9,10],ymm1[11],ymm0[12,13],ymm1[14,15]
+; X64-AVX512-NEXT: retq
+ %x1 = add <16 x i16> %x, %z
+ %shuffle = shufflevector <16 x i16> %x1, <16 x i16> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 10, i32 27, i32 12, i32 13, i32 30, i32 31>
+ ret <16 x i16> %shuffle
+}
+
+define <8 x i16> @blendw_to_blendd_fail_16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %z) nounwind {
+; X86-SSE41-LABEL: blendw_to_blendd_fail_16:
+; X86-SSE41: # %bb.0:
+; X86-SSE41-NEXT: paddw %xmm2, %xmm0
+; X86-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X86-SSE41-NEXT: retl
+;
+; X64-SSE41-LABEL: blendw_to_blendd_fail_16:
+; X64-SSE41: # %bb.0:
+; X64-SSE41-NEXT: paddw %xmm2, %xmm0
+; X64-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-SSE41-NEXT: retq
+;
+; X86-AVX-LABEL: blendw_to_blendd_fail_16:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X86-AVX-NEXT: retl
+;
+; X64-AVX-LABEL: blendw_to_blendd_fail_16:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-AVX-NEXT: retq
+;
+; X86-AVX2-LABEL: blendw_to_blendd_fail_16:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X86-AVX2-NEXT: retl
+;
+; X64-AVX2-LABEL: blendw_to_blendd_fail_16:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-AVX2-NEXT: retq
+;
+; X86-AVX512-LABEL: blendw_to_blendd_fail_16:
+; X86-AVX512: # %bb.0:
+; X86-AVX512-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X86-AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X86-AVX512-NEXT: retl
+;
+; X64-AVX512-LABEL: blendw_to_blendd_fail_16:
+; X64-AVX512: # %bb.0:
+; X64-AVX512-NEXT: vpaddw %xmm2, %xmm0, %xmm0
+; X64-AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; X64-AVX512-NEXT: retq
+ %x1 = add <8 x i16> %x, %z
+ %shuffle = shufflevector <8 x i16> %x1, <8 x i16> %y, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 14, i32 15>
+ ret <8 x i16> %shuffle
+}
More information about the llvm-commits
mailing list