[llvm] d32d65b - [X86] Regenerate x86-interleaved-access.ll with AVX1OR2 common check-prefix to reduce duplication

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 29 05:13:02 PDT 2022


Author: Simon Pilgrim
Date: 2022-03-29T12:24:46+01:00
New Revision: d32d65b9036ba126b2ed10b3c7194162102979ac

URL: https://github.com/llvm/llvm-project/commit/d32d65b9036ba126b2ed10b3c7194162102979ac
DIFF: https://github.com/llvm/llvm-project/commit/d32d65b9036ba126b2ed10b3c7194162102979ac.diff

LOG: [X86] Regenerate x86-interleaved-access.ll with AVX1OR2 common check-prefix to reduce duplication

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/x86-interleaved-access.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index dce96d4941c0f..0c5feb06243b4 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2OR512,AVX2
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2OR512,AVX2
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2OR512,AVX512
 
 define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
@@ -164,39 +164,22 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
 }
 
 define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) {
-; AVX1-LABEL: store_factorf64_4:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vmovups %ymm0, 96(%rdi)
-; AVX1-NEXT:    vmovups %ymm3, 64(%rdi)
-; AVX1-NEXT:    vmovups %ymm4, 32(%rdi)
-; AVX1-NEXT:    vmovups %ymm2, (%rdi)
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: store_factorf64_4:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT:    vmovups %ymm0, 96(%rdi)
-; AVX2-NEXT:    vmovups %ymm3, 64(%rdi)
-; AVX2-NEXT:    vmovups %ymm4, 32(%rdi)
-; AVX2-NEXT:    vmovups %ymm2, (%rdi)
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: store_factorf64_4:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1OR2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1OR2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX1OR2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1OR2-NEXT:    vmovups %ymm0, 96(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm3, 64(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm4, 32(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm2, (%rdi)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_factorf64_4:
 ; AVX512:       # %bb.0:
@@ -222,39 +205,22 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x doubl
 }
 
 define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) {
-; AVX1-LABEL: store_factori64_4:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX1-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX1-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX1-NEXT:    vmovups %ymm0, 96(%rdi)
-; AVX1-NEXT:    vmovups %ymm3, 64(%rdi)
-; AVX1-NEXT:    vmovups %ymm4, 32(%rdi)
-; AVX1-NEXT:    vmovups %ymm2, (%rdi)
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: store_factori64_4:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
-; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT:    vmovups %ymm0, 96(%rdi)
-; AVX2-NEXT:    vmovups %ymm3, 64(%rdi)
-; AVX2-NEXT:    vmovups %ymm4, 32(%rdi)
-; AVX2-NEXT:    vmovups %ymm2, (%rdi)
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: store_factori64_4:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1OR2-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1OR2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
+; AVX1OR2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
+; AVX1OR2-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX1OR2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
+; AVX1OR2-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1OR2-NEXT:    vmovups %ymm0, 96(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm3, 64(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm4, 32(%rdi)
+; AVX1OR2-NEXT:    vmovups %ymm2, (%rdi)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_factori64_4:
 ; AVX512:       # %bb.0:
@@ -363,37 +329,21 @@ ret void
 }
 
 define void @interleaved_store_vf16_i8_stride4(<16 x i8> %x1, <16 x i8> %x2, <16 x i8> %x3, <16 x i8> %x4, <64 x i8>* %p) {
-; AVX1-LABEL: interleaved_store_vf16_i8_stride4:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT:    vmovdqa %xmm0, 48(%rdi)
-; AVX1-NEXT:    vmovdqa %xmm4, 32(%rdi)
-; AVX1-NEXT:    vmovdqa %xmm1, 16(%rdi)
-; AVX1-NEXT:    vmovdqa %xmm3, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: interleaved_store_vf16_i8_stride4:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX2-NEXT:    vmovdqa %xmm0, 48(%rdi)
-; AVX2-NEXT:    vmovdqa %xmm4, 32(%rdi)
-; AVX2-NEXT:    vmovdqa %xmm1, 16(%rdi)
-; AVX2-NEXT:    vmovdqa %xmm3, (%rdi)
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: interleaved_store_vf16_i8_stride4:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1OR2-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1OR2-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1OR2-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1OR2-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX1OR2-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; AVX1OR2-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1OR2-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1OR2-NEXT:    vmovdqa %xmm0, 48(%rdi)
+; AVX1OR2-NEXT:    vmovdqa %xmm4, 32(%rdi)
+; AVX1OR2-NEXT:    vmovdqa %xmm1, 16(%rdi)
+; AVX1OR2-NEXT:    vmovdqa %xmm3, (%rdi)
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: interleaved_store_vf16_i8_stride4:
 ; AVX512:       # %bb.0:
@@ -419,55 +369,30 @@ ret void
 }
 
 define <8 x i8> @interleaved_load_vf8_i8_stride4(<32 x i8>* %ptr) {
-; AVX1-LABEL: interleaved_load_vf8_i8_stride4:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,u,5,u,9,u,13,u,13,u,5,u,12,u,13,u>
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm3
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,u,7,u,11,u,15,u,7,u,15,u,6,u,7,u>
-; AVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
-; AVX1-NEXT:    vpsrld $16, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpmullw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: interleaved_load_vf8_i8_stride4:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,u,5,u,9,u,13,u,13,u,5,u,12,u,13,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm3
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
-; AVX2-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
-; AVX2-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,u,7,u,11,u,15,u,7,u,15,u,6,u,7,u>
-; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
-; AVX2-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
-; AVX2-NEXT:    vpsrld $16, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
-; AVX2-NEXT:    vpmullw %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: interleaved_load_vf8_i8_stride4:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1OR2-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1OR2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,u,5,u,9,u,13,u,13,u,5,u,12,u,13,u>
+; AVX1OR2-NEXT:    vpshufb %xmm2, %xmm1, %xmm3
+; AVX1OR2-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
+; AVX1OR2-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX1OR2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1OR2-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; AVX1OR2-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
+; AVX1OR2-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1OR2-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1OR2-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,u,7,u,11,u,15,u,7,u,15,u,6,u,7,u>
+; AVX1OR2-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
+; AVX1OR2-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
+; AVX1OR2-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX1OR2-NEXT:    vpsrld $16, %xmm1, %xmm1
+; AVX1OR2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpaddb %xmm0, %xmm3, %xmm0
+; AVX1OR2-NEXT:    vpmullw %xmm0, %xmm2, %xmm0
+; AVX1OR2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: interleaved_load_vf8_i8_stride4:
 ; AVX512:       # %bb.0:
@@ -1865,31 +1790,18 @@ define void @splat4_v8i32_load_store(<8 x i32>* %s, <32 x i32>* %d) {
 }
 
 define void @splat4_v4f64_load_store(<4 x double>* %s, <16 x double>* %d) {
-; AVX1-LABEL: splat4_v4f64_load_store:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vbroadcastsd (%rdi), %ymm0
-; AVX1-NEXT:    vbroadcastsd 16(%rdi), %ymm1
-; AVX1-NEXT:    vbroadcastsd 8(%rdi), %ymm2
-; AVX1-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX1-NEXT:    vmovups %ymm3, 96(%rsi)
-; AVX1-NEXT:    vmovups %ymm1, 64(%rsi)
-; AVX1-NEXT:    vmovups %ymm2, 32(%rsi)
-; AVX1-NEXT:    vmovups %ymm0, (%rsi)
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: splat4_v4f64_load_store:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
-; AVX2-NEXT:    vbroadcastsd 16(%rdi), %ymm1
-; AVX2-NEXT:    vbroadcastsd 8(%rdi), %ymm2
-; AVX2-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX2-NEXT:    vmovups %ymm3, 96(%rsi)
-; AVX2-NEXT:    vmovups %ymm1, 64(%rsi)
-; AVX2-NEXT:    vmovups %ymm2, 32(%rsi)
-; AVX2-NEXT:    vmovups %ymm0, (%rsi)
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: splat4_v4f64_load_store:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastsd (%rdi), %ymm0
+; AVX1OR2-NEXT:    vbroadcastsd 16(%rdi), %ymm1
+; AVX1OR2-NEXT:    vbroadcastsd 8(%rdi), %ymm2
+; AVX1OR2-NEXT:    vbroadcastsd 24(%rdi), %ymm3
+; AVX1OR2-NEXT:    vmovups %ymm3, 96(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm1, 64(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm2, 32(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rsi)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: splat4_v4f64_load_store:
 ; AVX512:       # %bb.0:
@@ -1912,31 +1824,18 @@ define void @splat4_v4f64_load_store(<4 x double>* %s, <16 x double>* %d) {
 }
 
 define void @splat4_v4i64_load_store(<4 x i64>* %s, <16 x i64>* %d) {
-; AVX1-LABEL: splat4_v4i64_load_store:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vbroadcastsd (%rdi), %ymm0
-; AVX1-NEXT:    vbroadcastsd 16(%rdi), %ymm1
-; AVX1-NEXT:    vbroadcastsd 8(%rdi), %ymm2
-; AVX1-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX1-NEXT:    vmovups %ymm3, 96(%rsi)
-; AVX1-NEXT:    vmovups %ymm1, 64(%rsi)
-; AVX1-NEXT:    vmovups %ymm2, 32(%rsi)
-; AVX1-NEXT:    vmovups %ymm0, (%rsi)
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: splat4_v4i64_load_store:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vbroadcastsd (%rdi), %ymm0
-; AVX2-NEXT:    vbroadcastsd 16(%rdi), %ymm1
-; AVX2-NEXT:    vbroadcastsd 8(%rdi), %ymm2
-; AVX2-NEXT:    vbroadcastsd 24(%rdi), %ymm3
-; AVX2-NEXT:    vmovups %ymm3, 96(%rsi)
-; AVX2-NEXT:    vmovups %ymm1, 64(%rsi)
-; AVX2-NEXT:    vmovups %ymm2, 32(%rsi)
-; AVX2-NEXT:    vmovups %ymm0, (%rsi)
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: splat4_v4i64_load_store:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vbroadcastsd (%rdi), %ymm0
+; AVX1OR2-NEXT:    vbroadcastsd 16(%rdi), %ymm1
+; AVX1OR2-NEXT:    vbroadcastsd 8(%rdi), %ymm2
+; AVX1OR2-NEXT:    vbroadcastsd 24(%rdi), %ymm3
+; AVX1OR2-NEXT:    vmovups %ymm3, 96(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm1, 64(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm2, 32(%rsi)
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rsi)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: splat4_v4i64_load_store:
 ; AVX512:       # %bb.0:


        


More information about the llvm-commits mailing list