[llvm] e95c119 - [X86] oddshuffles.ll - update check-prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 25 08:13:30 PST 2022


Author: Simon Pilgrim
Date: 2022-11-25T16:13:20Z
New Revision: e95c119c3f9430c23da3ce978b19841a24c9ec6a

URL: https://github.com/llvm/llvm-project/commit/e95c119c3f9430c23da3ce978b19841a24c9ec6a
DIFF: https://github.com/llvm/llvm-project/commit/e95c119c3f9430c23da3ce978b19841a24c9ec6a.diff

LOG: [X86] oddshuffles.ll - update check-prefixes

Share AVX common prefix with XOP as well as AVX1/AVX2

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/oddshuffles.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index 6ac734cad64c..4e7b011d11f5 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-ALL
 ; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-PERLANE
-; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+xop | FileCheck %s --check-prefix=XOP
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+xop | FileCheck %s --check-prefixes=AVX,XOP
 
 define void @v3i64(<2 x i64> %a, <2 x i64> %b, ptr %p) nounwind {
 ; SSE2-LABEL: v3i64:
@@ -29,13 +29,6 @@ define void @v3i64(<2 x i64> %a, <2 x i64> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vpextrq $1, %xmm0, 16(%rdi)
 ; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v3i64:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
-; XOP-NEXT:    vpextrq $1, %xmm0, 16(%rdi)
-; XOP-NEXT:    vmovdqa %xmm1, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <2 x i64> %a, <2 x i64> %b, <3 x i32> <i32 0, i32 2, i32 1>
   store <3 x i64> %r, ptr %p
   ret void
@@ -54,13 +47,6 @@ define void @v3f64(<2 x double> %a, <2 x double> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vmovhps %xmm0, 16(%rdi)
 ; AVX-NEXT:    vmovaps %xmm1, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v3f64:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
-; XOP-NEXT:    vmovhps %xmm0, 16(%rdi)
-; XOP-NEXT:    vmovaps %xmm1, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <2 x double> %a, <2 x double> %b, <3 x i32> <i32 0, i32 2, i32 1>
   store <3 x double> %r, ptr %p
   ret void
@@ -88,13 +74,6 @@ define void @v3i32(<2 x i32> %a, <2 x i32> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vextractps $1, %xmm0, 8(%rdi)
 ; AVX-NEXT:    vmovlps %xmm1, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v3i32:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; XOP-NEXT:    vextractps $1, %xmm0, 8(%rdi)
-; XOP-NEXT:    vmovlps %xmm1, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <2 x i32> %a, <2 x i32> %b, <3 x i32> <i32 0, i32 2, i32 1>
   store <3 x i32> %r, ptr %p
   ret void
@@ -125,14 +104,6 @@ define void @v5i16(<4 x i16> %a, <4 x i16> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vpextrw $3, %xmm0, 8(%rdi)
 ; AVX-NEXT:    vmovq %xmm1, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v5i16:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vpsrlq $16, %xmm1, %xmm1
-; XOP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; XOP-NEXT:    vpextrw $3, %xmm0, 8(%rdi)
-; XOP-NEXT:    vmovq %xmm1, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <4 x i16> %a, <4 x i16> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
   store <5 x i16> %r, ptr %p
   ret void
@@ -212,14 +183,6 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vextractps $3, %xmm0, 16(%rdi)
 ; AVX-NEXT:    vmovaps %xmm1, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v5f32:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; XOP-NEXT:    vextractps $3, %xmm0, 16(%rdi)
-; XOP-NEXT:    vmovaps %xmm1, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <4 x float> %a, <4 x float> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
   store <5 x float> %r, ptr %p
   ret void
@@ -253,14 +216,23 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, ptr %p) nounwind {
 ; SSE42-NEXT:    movd %xmm1, (%rdi)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: v7i8:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpextrb $0, %xmm1, 6(%rdi)
-; AVX-NEXT:    vpextrw $2, %xmm0, 4(%rdi)
-; AVX-NEXT:    vmovd %xmm0, (%rdi)
-; AVX-NEXT:    retq
+; AVX1-LABEL: v7i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpextrb $0, %xmm1, 6(%rdi)
+; AVX1-NEXT:    vpextrw $2, %xmm0, 4(%rdi)
+; AVX1-NEXT:    vmovd %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v7i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpextrb $0, %xmm1, 6(%rdi)
+; AVX2-NEXT:    vpextrw $2, %xmm0, 4(%rdi)
+; AVX2-NEXT:    vmovd %xmm0, (%rdi)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: v7i8:
 ; XOP:       # %bb.0:
@@ -299,14 +271,23 @@ define void @v7i16(<4 x i16> %a, <4 x i16> %b, ptr %p) nounwind {
 ; SSE42-NEXT:    movq %xmm1, (%rdi)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: v7i16:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
-; AVX-NEXT:    vpextrw $0, %xmm1, 12(%rdi)
-; AVX-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
-; AVX-NEXT:    vmovq %xmm0, (%rdi)
-; AVX-NEXT:    retq
+; AVX1-LABEL: v7i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
+; AVX1-NEXT:    vpextrw $0, %xmm1, 12(%rdi)
+; AVX1-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX1-NEXT:    vmovq %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v7i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
+; AVX2-NEXT:    vpextrw $0, %xmm1, 12(%rdi)
+; AVX2-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX2-NEXT:    vmovq %xmm0, (%rdi)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: v7i16:
 ; XOP:       # %bb.0:
@@ -357,17 +338,6 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, ptr %p) nounwind {
 ; AVX-NEXT:    vmovlps %xmm0, 16(%rdi)
 ; AVX-NEXT:    vmovaps %xmm2, (%rdi)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: v7i32:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,2]
-; XOP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; XOP-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; XOP-NEXT:    vmovss %xmm1, 24(%rdi)
-; XOP-NEXT:    vmovlps %xmm0, 16(%rdi)
-; XOP-NEXT:    vmovaps %xmm2, (%rdi)
-; XOP-NEXT:    retq
   %r = shufflevector <4 x i32> %a, <4 x i32> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
   store <7 x i32> %r, ptr %p
   ret void
@@ -404,13 +374,21 @@ define void @v12i8(<8 x i8> %a, <8 x i8> %b, ptr %p) nounwind {
 ; SSE42-NEXT:    movq %xmm0, (%rdi)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: v12i8:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
-; AVX-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
-; AVX-NEXT:    vmovq %xmm0, (%rdi)
-; AVX-NEXT:    retq
+; AVX1-LABEL: v12i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
+; AVX1-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX1-NEXT:    vmovq %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v12i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
+; AVX2-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX2-NEXT:    vmovq %xmm0, (%rdi)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: v12i8:
 ; XOP:       # %bb.0:
@@ -663,14 +641,23 @@ define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, ptr%p) nounwind {
 ; SSE42-NEXT:    movq %xmm0, (%rdi)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: pr29025:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
-; AVX-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
-; AVX-NEXT:    vmovq %xmm0, (%rdi)
-; AVX-NEXT:    retq
+; AVX1-LABEL: pr29025:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
+; AVX1-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX1-NEXT:    vmovq %xmm0, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: pr29025:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
+; AVX2-NEXT:    vpextrd $2, %xmm0, 8(%rdi)
+; AVX2-NEXT:    vmovq %xmm0, (%rdi)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: pr29025:
 ; XOP:       # %bb.0:
@@ -768,23 +755,41 @@ define void @interleave_24i8_out(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
 ; SSE42-NEXT:    movq %xmm0, (%rcx)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: interleave_24i8_out:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqu (%rdi), %xmm0
-; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmovq %xmm2, (%rsi)
-; AVX-NEXT:    vmovq %xmm3, (%rdx)
-; AVX-NEXT:    vmovq %xmm0, (%rcx)
-; AVX-NEXT:    retq
+; AVX1-LABEL: interleave_24i8_out:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqu (%rdi), %xmm0
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm2, (%rsi)
+; AVX1-NEXT:    vmovq %xmm3, (%rdx)
+; AVX1-NEXT:    vmovq %xmm0, (%rcx)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: interleave_24i8_out:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqu (%rdi), %xmm0
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm2, (%rsi)
+; AVX2-NEXT:    vmovq %xmm3, (%rdx)
+; AVX2-NEXT:    vmovq %xmm0, (%rcx)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: interleave_24i8_out:
 ; XOP:       # %bb.0:
@@ -867,21 +872,37 @@ define void @interleave_24i8_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
 ; SSE42-NEXT:    movdqu %xmm3, (%rdi)
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: interleave_24i8_in:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero
-; AVX-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[13],zero,xmm1[6,14],zero,xmm1[7,15],zero,xmm1[u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
-; AVX-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vmovq %xmm0, 16(%rdi)
-; AVX-NEXT:    vmovdqu %xmm2, (%rdi)
-; AVX-NEXT:    retq
+; AVX1-LABEL: interleave_24i8_in:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero
+; AVX1-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[13],zero,xmm1[6,14],zero,xmm1[7,15],zero,xmm1[u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
+; AVX1-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, 16(%rdi)
+; AVX1-NEXT:    vmovdqu %xmm2, (%rdi)
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: interleave_24i8_in:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero
+; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[13],zero,xmm1[6,14],zero,xmm1[7,15],zero,xmm1[u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, 16(%rdi)
+; AVX2-NEXT:    vmovdqu %xmm2, (%rdi)
+; AVX2-NEXT:    retq
 ;
 ; XOP-LABEL: interleave_24i8_in:
 ; XOP:       # %bb.0:
@@ -2355,15 +2376,6 @@ define void @PR41097() {
 ; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
 ; AVX-NEXT:    vmovdqu %xmm0, (%rax)
 ; AVX-NEXT:    retq
-;
-; XOP-LABEL: PR41097:
-; XOP:       # %bb.0:
-; XOP-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; XOP-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; XOP-NEXT:    vpmovsxbd %xmm0, %xmm0
-; XOP-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; XOP-NEXT:    vmovdqu %xmm0, (%rax)
-; XOP-NEXT:    retq
   %wide.vec = load <6 x i8>, ptr undef, align 1
   %strided.vec = shufflevector <6 x i8> %wide.vec, <6 x i8> undef, <2 x i32> <i32 0, i32 3>
   %tmp = sext <2 x i8> %strided.vec to <2 x i32>


        


More information about the llvm-commits mailing list