[llvm] [X86] lowerBuildVectorToBitOp - build_vector(trunc(x),trunc(y),...) -> trunc(build_vector(x,y,...)) (PR #88945)

via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 16 10:54:57 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: Simon Pilgrim (RKSimon)

<details>
<summary>Changes</summary>

Truncations of vectors don't always survive legalization - use lowerBuildVectorToBitOp to recreate the vector truncation if we can

Do we need a ISD::TRUNCATE_VECTOR_INREG node?

---

Patch is 33.15 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/88945.diff


3 Files Affected:

- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+13-1) 
- (modified) llvm/test/CodeGen/X86/setcc-wide-types.ll (+43-29) 
- (modified) llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll (+70-479) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f16a751a166d69..25d8d4ff38abcf 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -8310,7 +8310,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {
-  MVT VT = Op->getSimpleValueType(0);
+  EVT VT = Op->getValueType(0);
   unsigned NumElems = VT.getVectorNumElements();
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
@@ -8341,6 +8341,18 @@ static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op, const SDLoc &DL,
     if (!TLI.isOperationLegalOrPromote(Opcode, VT))
       return SDValue();
     break;
+  // TODO: Add ANY/SIGN/ZERO_EXTEND support.
+  case ISD::TRUNCATE: {
+    SmallVector<SDValue, 32> Elts;
+    for (SDValue Elt : Op->ops())
+      Elts.push_back(Elt.getOperand(0));
+    EVT SrcSVT = Elts[0].getValueType();
+    EVT SrcVT = VT.changeVectorElementType(SrcSVT);
+    if (Op->getSplatValue() || !TLI.isTypeLegal(SrcVT) ||
+        any_of(Elts, [&](SDValue Elt) { return Elt.getValueType() != SrcSVT; }))
+      return SDValue();
+    return DAG.getNode(Opcode, DL, VT, DAG.getBuildVector(SrcVT, DL, Elts));
+  }
   }
 
   SmallVector<SDValue, 4> LHSElts, RHSElts;
diff --git a/llvm/test/CodeGen/X86/setcc-wide-types.ll b/llvm/test/CodeGen/X86/setcc-wide-types.ll
index 5aa266db6553d3..c257c7f2abc12d 100644
--- a/llvm/test/CodeGen/X86/setcc-wide-types.ll
+++ b/llvm/test/CodeGen/X86/setcc-wide-types.ll
@@ -722,41 +722,55 @@ define i1 @ne_v4i256(<4 x i256> %a0) {
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovd %eax, %xmm0
-; AVX512-NEXT:    shrq $32, %rax
-; AVX512-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT:    vpinsrd $2, %r10d, %xmm0, %xmm0
-; AVX512-NEXT:    shrq $32, %r10
-; AVX512-NEXT:    vpinsrd $3, %r10d, %xmm0, %xmm0
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r8
-; AVX512-NEXT:    vmovd %r8d, %xmm1
-; AVX512-NEXT:    shrq $32, %r8
-; AVX512-NEXT:    vpinsrd $1, %r8d, %xmm1, %xmm1
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r9
-; AVX512-NEXT:    vpinsrd $2, %r9d, %xmm1, %xmm1
-; AVX512-NEXT:    shrq $32, %r9
-; AVX512-NEXT:    vpinsrd $3, %r9d, %xmm1, %xmm1
-; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rdx
-; AVX512-NEXT:    vmovd %edx, %xmm1
-; AVX512-NEXT:    shrq $32, %rdx
-; AVX512-NEXT:    vpinsrd $1, %edx, %xmm1, %xmm1
 ; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rcx
-; AVX512-NEXT:    vpinsrd $2, %ecx, %xmm1, %xmm1
+; AVX512-NEXT:    vmovq %rcx, %xmm0
 ; AVX512-NEXT:    shrq $32, %rcx
-; AVX512-NEXT:    vpinsrd $3, %ecx, %xmm1, %xmm1
-; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rdi
-; AVX512-NEXT:    vmovd %edi, %xmm2
-; AVX512-NEXT:    shrq $32, %rdi
-; AVX512-NEXT:    vpinsrd $1, %edi, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT:    vmovq %rdx, %xmm1
+; AVX512-NEXT:    shrq $32, %rdx
+; AVX512-NEXT:    vmovd %edx, %xmm2
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rsi
-; AVX512-NEXT:    vpinsrd $2, %esi, %xmm2, %xmm2
+; AVX512-NEXT:    vmovq %rsi, %xmm1
 ; AVX512-NEXT:    shrq $32, %rsi
-; AVX512-NEXT:    vpinsrd $3, %esi, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %esi, %xmm2
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT:    vmovq %rdi, %xmm2
+; AVX512-NEXT:    shrq $32, %rdi
+; AVX512-NEXT:    vmovd %edi, %xmm3
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
 ; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT:    vpmovqd %zmm0, %ymm0
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    vmovq %r10, %xmm1
+; AVX512-NEXT:    shrq $32, %r10
+; AVX512-NEXT:    vmovd %r10d, %xmm2
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovq %rax, %xmm2
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm3
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT:    vmovq %r9, %xmm2
+; AVX512-NEXT:    shrq $32, %r9
+; AVX512-NEXT:    vmovd %r9d, %xmm3
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512-NEXT:    orq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT:    vmovq %r8, %xmm3
+; AVX512-NEXT:    shrq $32, %r8
+; AVX512-NEXT:    vmovd %r8d, %xmm4
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; AVX512-NEXT:    kortestw %k0, %k0
 ; AVX512-NEXT:    sete %al
diff --git a/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll b/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll
index aea76f694a0fc2..26fe93cd6e0822 100644
--- a/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll
+++ b/llvm/test/CodeGen/X86/shuffle-vs-trunc-128.ll
@@ -12,8 +12,8 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-ONLY
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-ONLY
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512VBMI,AVX512VBMI-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512VBMI,AVX512VBMI-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512VBMI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vbmi,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512VBMI
 
 ; PR31551
 ; Pairs of shufflevector:trunc functions with functional equivalence.
@@ -953,253 +953,52 @@ define <16 x i8> @evenelts_v32i16_trunc_v16i16_to_v16i8(<32 x i16> %n2) nounwind
 ;
 ; AVX1-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    pushq %rbp
-; AVX1-NEXT:    pushq %r14
-; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX1-NEXT:    vpextrw $4, %xmm2, %ecx
-; AVX1-NEXT:    vpextrw $2, %xmm2, %edx
-; AVX1-NEXT:    vmovd %xmm2, %esi
-; AVX1-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX1-NEXT:    vpextrw $4, %xmm1, %r8d
-; AVX1-NEXT:    vpextrw $2, %xmm1, %r9d
-; AVX1-NEXT:    vmovd %xmm1, %r10d
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT:    vandps %ymm3, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrw $6, %xmm1, %r11d
-; AVX1-NEXT:    vpextrw $4, %xmm1, %ebx
-; AVX1-NEXT:    vpextrw $2, %xmm1, %ebp
-; AVX1-NEXT:    vmovd %xmm1, %r14d
-; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX1-NEXT:    vpinsrb $4, %r14d, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $5, %ebp, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    popq %r14
-; AVX1-NEXT:    popq %rbp
+; AVX1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    pushq %rbp
-; AVX2-NEXT:    pushq %r14
-; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX2-NEXT:    vpextrw $4, %xmm2, %ecx
-; AVX2-NEXT:    vpextrw $2, %xmm2, %edx
-; AVX2-NEXT:    vmovd %xmm2, %esi
-; AVX2-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX2-NEXT:    vpextrw $4, %xmm1, %r8d
-; AVX2-NEXT:    vpextrw $2, %xmm1, %r9d
-; AVX2-NEXT:    vmovd %xmm1, %r10d
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpextrw $6, %xmm1, %r11d
-; AVX2-NEXT:    vpextrw $4, %xmm1, %ebx
-; AVX2-NEXT:    vpextrw $2, %xmm1, %ebp
-; AVX2-NEXT:    vmovd %xmm1, %r14d
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-NEXT:    vpinsrb $4, %r14d, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $5, %ebp, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    popq %r14
-; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
-; AVX512F-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    pushq %rbp
-; AVX512F-NEXT:    pushq %rbx
-; AVX512F-NEXT:    vpmovdb %zmm0, %xmm1
-; AVX512F-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
-; AVX512F-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512F-NEXT:    vpextrw $4, %xmm2, %ecx
-; AVX512F-NEXT:    vpextrw $2, %xmm2, %edx
-; AVX512F-NEXT:    vmovd %xmm2, %esi
-; AVX512F-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; AVX512F-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512F-NEXT:    vpextrw $4, %xmm2, %r8d
-; AVX512F-NEXT:    vpextrw $2, %xmm2, %r9d
-; AVX512F-NEXT:    vmovd %xmm2, %r10d
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512F-NEXT:    vpextrw $6, %xmm0, %r11d
-; AVX512F-NEXT:    vpextrw $4, %xmm0, %ebx
-; AVX512F-NEXT:    vpextrw $2, %xmm0, %ebp
-; AVX512F-NEXT:    vpinsrb $5, %ebp, %xmm1, %xmm0
-; AVX512F-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX512F-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512F-NEXT:    popq %rbx
-; AVX512F-NEXT:    popq %rbp
-; AVX512F-NEXT:    vzeroupper
-; AVX512F-NEXT:    retq
-;
-; AVX512VL-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    pushq %rbp
-; AVX512VL-NEXT:    pushq %r14
-; AVX512VL-NEXT:    pushq %rbx
-; AVX512VL-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; AVX512VL-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX512VL-NEXT:    vpextrw $4, %xmm1, %ecx
-; AVX512VL-NEXT:    vpextrw $2, %xmm1, %edx
-; AVX512VL-NEXT:    vmovd %xmm1, %esi
-; AVX512VL-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
-; AVX512VL-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512VL-NEXT:    vpextrw $4, %xmm1, %r8d
-; AVX512VL-NEXT:    vpextrw $2, %xmm1, %r9d
-; AVX512VL-NEXT:    vmovd %xmm1, %r10d
-; AVX512VL-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512VL-NEXT:    vpextrw $6, %xmm1, %r11d
-; AVX512VL-NEXT:    vpextrw $4, %xmm1, %ebx
-; AVX512VL-NEXT:    vpextrw $2, %xmm1, %ebp
-; AVX512VL-NEXT:    vmovd %xmm1, %r14d
-; AVX512VL-NEXT:    vpmovdb %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $4, %r14d, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $5, %ebp, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX512VL-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT:    popq %rbx
-; AVX512VL-NEXT:    popq %r14
-; AVX512VL-NEXT:    popq %rbp
-; AVX512VL-NEXT:    vzeroupper
-; AVX512VL-NEXT:    retq
-;
-; AVX512BW-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    pushq %rbp
-; AVX512BW-NEXT:    pushq %rbx
-; AVX512BW-NEXT:    vpmovdb %zmm0, %xmm1
-; AVX512BW-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
-; AVX512BW-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512BW-NEXT:    vpextrw $4, %xmm2, %ecx
-; AVX512BW-NEXT:    vpextrw $2, %xmm2, %edx
-; AVX512BW-NEXT:    vmovd %xmm2, %esi
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; AVX512BW-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512BW-NEXT:    vpextrw $4, %xmm2, %r8d
-; AVX512BW-NEXT:    vpextrw $2, %xmm2, %r9d
-; AVX512BW-NEXT:    vmovd %xmm2, %r10d
-; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-NEXT:    vpextrw $6, %xmm0, %r11d
-; AVX512BW-NEXT:    vpextrw $4, %xmm0, %ebx
-; AVX512BW-NEXT:    vpextrw $2, %xmm0, %ebp
-; AVX512BW-NEXT:    vpinsrb $5, %ebp, %xmm1, %xmm0
-; AVX512BW-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT:    popq %rbx
-; AVX512BW-NEXT:    popq %rbp
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
-;
-; AVX512BWVL-ONLY-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512BWVL-ONLY:       # %bb.0:
-; AVX512BWVL-ONLY-NEXT:    pushq %rbp
-; AVX512BWVL-ONLY-NEXT:    pushq %r14
-; AVX512BWVL-ONLY-NEXT:    pushq %rbx
-; AVX512BWVL-ONLY-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; AVX512BWVL-ONLY-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX512BWVL-ONLY-NEXT:    vpextrw $4, %xmm1, %ecx
-; AVX512BWVL-ONLY-NEXT:    vpextrw $2, %xmm1, %edx
-; AVX512BWVL-ONLY-NEXT:    vmovd %xmm1, %esi
-; AVX512BWVL-ONLY-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
-; AVX512BWVL-ONLY-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512BWVL-ONLY-NEXT:    vpextrw $4, %xmm1, %r8d
-; AVX512BWVL-ONLY-NEXT:    vpextrw $2, %xmm1, %r9d
-; AVX512BWVL-ONLY-NEXT:    vmovd %xmm1, %r10d
-; AVX512BWVL-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BWVL-ONLY-NEXT:    vpextrw $6, %xmm1, %r11d
-; AVX512BWVL-ONLY-NEXT:    vpextrw $4, %xmm1, %ebx
-; AVX512BWVL-ONLY-NEXT:    vpextrw $2, %xmm1, %ebp
-; AVX512BWVL-ONLY-NEXT:    vmovd %xmm1, %r14d
-; AVX512BWVL-ONLY-NEXT:    vpmovdb %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $4, %r14d, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $5, %ebp, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $6, %ebx, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $7, %r11d, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $8, %r10d, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $9, %r9d, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $10, %r8d, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $12, %esi, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512BWVL-ONLY-NEXT:    popq %rbx
-; AVX512BWVL-ONLY-NEXT:    popq %r14
-; AVX512BWVL-ONLY-NEXT:    popq %rbp
-; AVX512BWVL-ONLY-NEXT:    vzeroupper
-; AVX512BWVL-ONLY-NEXT:    retq
-;
-; AVX512VBMI-FAST-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512VBMI-FAST:       # %bb.0:
-; AVX512VBMI-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,79]
-; AVX512VBMI-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512VBMI-FAST-NEXT:    vpermi2b %zmm2, %zmm0, %zmm1
-; AVX512VBMI-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
-; AVX512VBMI-FAST-NEXT:    vpextrw $6, %xmm0, %eax
-; AVX512VBMI-FAST-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
-; AVX512VBMI-FAST-NEXT:    vzeroupper
-; AVX512VBMI-FAST-NEXT:    retq
-;
-; AVX512VBMI-SLOW-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
-; AVX512VBMI-SLOW:       # %bb.0:
-; AVX512VBMI-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,4,8,12,16,20,24,28,32,36,40,44,48,77,78,79]
-; AVX512VBMI-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512VBMI-SLOW-NEXT:    vpermi2b %zmm2, %zmm0, %zmm1
-; AVX512VBMI-SLOW-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
-; AVX512VBMI-SLOW-NEXT:    vpextrw $6, %xmm0, %eax
-; AVX512VBMI-SLOW-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX512VBMI-SLOW-NEXT:    vpextrw $2, %xmm0, %edx
-; AVX512VBMI-SLOW-NEXT:    vpinsrb $13, %edx, %xmm1, %xmm0
-; AVX512VBMI-SLOW-NEXT:    vpinsrb $14, %ecx, %xmm0, %xmm0
-; AVX512VBMI-SLOW-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512VBMI-SLOW-NEXT:    vzeroupper
-; AVX512VBMI-SLOW-NEXT:    retq
+; AVX512-LABEL: evenelts_v32i16_trunc_v16i16_to_v16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm3
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %n0 = shufflevector <32 x i16> %n2, <32 x i16> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/88945


More information about the llvm-commits mailing list