[llvm] [X86] combineConcatVectorOps - convert X86ISD::PACKSS/US concatenation to use combineConcatVectorOps recursion (PR #130575)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 10 03:38:37 PDT 2025


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/130575

>From c97c117fba719487a60f24a998216cd1005e3ecc Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 10 Mar 2025 10:31:29 +0000
Subject: [PATCH] [X86] combineConcatVectorOps - convert X86ISD::PACKSS/US
 concatenation to use combineConcatVectorOps recursion

Only concatenate X86ISD::PACKSS/US nodes if at least one operand is beneficial to concatenate
---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  10 +-
 llvm/test/CodeGen/X86/vector-pack-512.ll      |  42 +--
 .../X86/vector-shuffle-combining-avx2.ll      |  14 +-
 llvm/test/CodeGen/X86/widen_bitcnt.ll         | 292 +++++++++---------
 4 files changed, 179 insertions(+), 179 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d83033d24bdbb..1f0b6d6495e67 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58431,9 +58431,13 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
                                  NumOps * SrcVT.getVectorNumElements());
-        return DAG.getNode(Op0.getOpcode(), DL, VT,
-                           ConcatSubOperand(SrcVT, Ops, 0),
-                           ConcatSubOperand(SrcVT, Ops, 1));
+        SDValue Concat0 = CombineSubOperand(SrcVT, Ops, 0);
+        SDValue Concat1 = CombineSubOperand(SrcVT, Ops, 1);
+        if (Concat0 || Concat1)
+          return DAG.getNode(
+              Op0.getOpcode(), DL, VT,
+              Concat0 ? Concat0 : ConcatSubOperand(SrcVT, Ops, 0),
+              Concat1 ? Concat1 : ConcatSubOperand(SrcVT, Ops, 1));
       }
       break;
     case X86ISD::PALIGNR:
diff --git a/llvm/test/CodeGen/X86/vector-pack-512.ll b/llvm/test/CodeGen/X86/vector-pack-512.ll
index dc60bfdca53b2..30e61a68bb22f 100644
--- a/llvm/test/CodeGen/X86/vector-pack-512.ll
+++ b/llvm/test/CodeGen/X86/vector-pack-512.ll
@@ -245,21 +245,12 @@ define <64 x i8> @concat_trunc_packuswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
 }
 
 define <32 x i16> @concat_packsswd_int_2x256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
-; AVX512F-LABEL: concat_packsswd_int_2x256:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpackssdw %ymm3, %ymm2, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: concat_packsswd_int_2x256:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackssdw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: concat_packsswd_int_2x256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpackssdw %ymm3, %ymm2, %ymm1
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %lo = tail call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
   %hi = tail call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a2, <8 x i32> %a3)
   %res = shufflevector <16 x i16> %lo, <16 x i16> %hi, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -268,21 +259,12 @@ define <32 x i16> @concat_packsswd_int_2x256(<8 x i32> %a0, <8 x i32> %a1, <8 x
 declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>)
 
 define <32 x i16> @concat_packuswd_int_2x256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) {
-; AVX512F-LABEL: concat_packuswd_int_2x256:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpackusdw %ymm3, %ymm2, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: concat_packuswd_int_2x256:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; AVX512BW-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpackusdw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: concat_packuswd_int_2x256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpackusdw %ymm3, %ymm2, %ymm1
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %lo = tail call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
   %hi = tail call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a2, <8 x i32> %a3)
   %res = shufflevector <16 x i16> %lo, <16 x i16> %hi, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 26d45993f7b8a..a23fd640c07a2 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -791,15 +791,13 @@ define <32 x i8> @concat_alignr_unnecessary(<16 x i8> %a0, <16 x i8> noundef %a1
   ret <32 x i8> %res
 }
 
-; TODO: Not beneficial to concatenate both inputs just to create a 256-bit packss
-define <32 x i8> @concat_packsr_unnecessary(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) nounwind {
-; CHECK-LABEL: concat_packsr_unnecessary:
+; Not beneficial to concatenate both inputs just to create a 256-bit packss
+define <32 x i8> @concat_packss_unnecessary(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) nounwind {
+; CHECK-LABEL: concat_packss_unnecessary:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; CHECK-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; CHECK-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpacksswb %xmm1, %xmm0, %xmm1
+; CHECK-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; CHECK-NEXT:    ret{{[l|q]}}
   %lo = tail call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1)
   %hi = tail call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a2)
diff --git a/llvm/test/CodeGen/X86/widen_bitcnt.ll b/llvm/test/CodeGen/X86/widen_bitcnt.ll
index cca9d4aa2a9f0..efe177963ff49 100644
--- a/llvm/test/CodeGen/X86/widen_bitcnt.ll
+++ b/llvm/test/CodeGen/X86/widen_bitcnt.ll
@@ -241,78 +241,86 @@ define <8 x i32> @widen_ctpop_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32
 ;
 ; AVX2-LABEL: widen_ctpop_v2i32_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm4
-; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT:    vpand %xmm5, %xmm4, %xmm4
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %xmm4, %xmm0, %xmm5
 ; AVX2-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX2-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
-; AVX2-NEXT:    vpand %xmm5, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm4, %xmm0, %xmm0
 ; AVX2-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm7
-; AVX2-NEXT:    vpand %xmm5, %xmm7, %xmm7
+; AVX2-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX2-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX2-NEXT:    vpsadbw %xmm5, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm4, %xmm1, %xmm7
 ; AVX2-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
-; AVX2-NEXT:    vpand %xmm5, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm4, %xmm1, %xmm1
 ; AVX2-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm8
-; AVX2-NEXT:    vpand %xmm5, %xmm8, %xmm8
-; AVX2-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm8[0]
-; AVX2-NEXT:    vpand %xmm5, %xmm2, %xmm2
+; AVX2-NEXT:    vpaddb %xmm7, %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsadbw %xmm5, %xmm1, %xmm1
+; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm4, %xmm2, %xmm1
+; AVX2-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm4, %xmm2, %xmm2
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
-; AVX2-NEXT:    vpsrlw $4, %xmm3, %xmm2
-; AVX2-NEXT:    vpand %xmm5, %xmm2, %xmm2
+; AVX2-NEXT:    vpaddb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsadbw %xmm5, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm4, %xmm3, %xmm2
 ; AVX2-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm2[0]
-; AVX2-NEXT:    vpand %xmm5, %xmm3, %xmm3
+; AVX2-NEXT:    vpsrlw $4, %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; AVX2-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2-NEXT:    vpaddb %xmm1, %xmm2, %xmm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpsadbw %xmm5, %xmm2, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: widen_ctpop_v2i32_v8i32:
 ; AVX512VL:       # %bb.0:
-; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm4
-; AVX512VL-NEXT:    vpbroadcastb {{.*#+}} xmm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT:    vpand %xmm5, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpbroadcastb {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %xmm4, %xmm0, %xmm5
 ; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX512VL-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
-; AVX512VL-NEXT:    vpand %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
+; AVX512VL-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpand %xmm4, %xmm0, %xmm0
 ; AVX512VL-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512VL-NEXT:    vpsrlw $4, %xmm1, %xmm7
-; AVX512VL-NEXT:    vpand %xmm5, %xmm7, %xmm7
+; AVX512VL-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512VL-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX512VL-NEXT:    vpsadbw %xmm5, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpand %xmm4, %xmm1, %xmm7
 ; AVX512VL-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
-; AVX512VL-NEXT:    vpand %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpand %xmm4, %xmm1, %xmm1
 ; AVX512VL-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; AVX512VL-NEXT:    vpsrlw $4, %xmm2, %xmm8
-; AVX512VL-NEXT:    vpand %xmm5, %xmm8, %xmm8
-; AVX512VL-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
-; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm8[0]
-; AVX512VL-NEXT:    vpand %xmm5, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpaddb %xmm7, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX512VL-NEXT:    vpsadbw %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpand %xmm4, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512VL-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpand %xmm4, %xmm2, %xmm2
 ; AVX512VL-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX512VL-NEXT:    vpaddb %xmm0, %xmm4, %xmm0
-; AVX512VL-NEXT:    vpsrlw $4, %xmm3, %xmm2
-; AVX512VL-NEXT:    vpand %xmm5, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpaddb %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX512VL-NEXT:    vpsadbw %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpand %xmm4, %xmm3, %xmm2
 ; AVX512VL-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm2[0]
-; AVX512VL-NEXT:    vpand %xmm5, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpsrlw $4, %xmm3, %xmm3
+; AVX512VL-NEXT:    vpand %xmm4, %xmm3, %xmm3
 ; AVX512VL-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX512VL-NEXT:    vpaddb %xmm1, %xmm2, %xmm1
-; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512VL-NEXT:    vpsadbw %ymm2, %ymm1, %ymm1
-; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512VL-NEXT:    vpsadbw %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VL-NEXT:    vpsadbw %xmm5, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-NEXT:    retq
 ;
 ; AVX512VPOPCNT-LABEL: widen_ctpop_v2i32_v8i32:
@@ -1297,49 +1305,53 @@ define <8 x i32> @widen_cttz_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>
 ;
 ; AVX2-LABEL: widen_cttz_v2i32_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-NEXT:    vpaddd %xmm4, %xmm0, %xmm5
-; AVX2-NEXT:    vpandn %xmm5, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm5
-; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT:    vpand %xmm6, %xmm5, %xmm5
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX2-NEXT:    vpshufb %xmm5, %xmm7, %xmm5
-; AVX2-NEXT:    vpand %xmm6, %xmm0, %xmm0
-; AVX2-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX2-NEXT:    vpaddd %xmm4, %xmm1, %xmm8
+; AVX2-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX2-NEXT:    vpaddd %xmm5, %xmm0, %xmm4
+; AVX2-NEXT:    vpandn %xmm4, %xmm0, %xmm6
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %xmm0, %xmm6, %xmm7
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX2-NEXT:    vpsrlw $4, %xmm6, %xmm6
+; AVX2-NEXT:    vpand %xmm0, %xmm6, %xmm6
+; AVX2-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX2-NEXT:    vpaddb %xmm7, %xmm6, %xmm6
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX2-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm6, %xmm6
+; AVX2-NEXT:    vpaddd %xmm5, %xmm1, %xmm8
 ; AVX2-NEXT:    vpandn %xmm8, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm8
-; AVX2-NEXT:    vpand %xmm6, %xmm8, %xmm8
-; AVX2-NEXT:    vpshufb %xmm8, %xmm7, %xmm8
-; AVX2-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX2-NEXT:    vpaddd %xmm4, %xmm2, %xmm9
-; AVX2-NEXT:    vpandn %xmm9, %xmm2, %xmm2
-; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm9
-; AVX2-NEXT:    vpand %xmm6, %xmm9, %xmm9
-; AVX2-NEXT:    vpshufb %xmm9, %xmm7, %xmm9
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm9[0]
-; AVX2-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT:    vpaddb %xmm0, %xmm5, %xmm0
-; AVX2-NEXT:    vpaddd %xmm4, %xmm3, %xmm2
-; AVX2-NEXT:    vpandn %xmm2, %xmm3, %xmm2
-; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm3
-; AVX2-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX2-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm8[0],xmm3[0]
-; AVX2-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm8
+; AVX2-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
+; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm1
+; AVX2-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpaddb %xmm1, %xmm8, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm1, %xmm1
+; AVX2-NEXT:    vpackuswb %xmm1, %xmm6, %xmm1
+; AVX2-NEXT:    vpaddd %xmm5, %xmm2, %xmm6
+; AVX2-NEXT:    vpandn %xmm6, %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm6
+; AVX2-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpaddb %xmm6, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm2, %xmm2
+; AVX2-NEXT:    vpaddd %xmm5, %xmm3, %xmm5
+; AVX2-NEXT:    vpandn %xmm5, %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm0, %xmm3, %xmm5
+; AVX2-NEXT:    vpshufb %xmm5, %xmm4, %xmm5
+; AVX2-NEXT:    vpsrlw $4, %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX2-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX2-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm0, %xmm0
+; AVX2-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: widen_cttz_v2i32_v8i32:
@@ -1640,49 +1652,53 @@ define <8 x i32> @widen_cttz_undef_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2
 ;
 ; AVX2-LABEL: widen_cttz_undef_v2i32_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-NEXT:    vpaddd %xmm4, %xmm0, %xmm5
-; AVX2-NEXT:    vpandn %xmm5, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrlw $4, %xmm0, %xmm5
-; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT:    vpand %xmm6, %xmm5, %xmm5
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; AVX2-NEXT:    vpshufb %xmm5, %xmm7, %xmm5
-; AVX2-NEXT:    vpand %xmm6, %xmm0, %xmm0
-; AVX2-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX2-NEXT:    vpaddd %xmm4, %xmm1, %xmm8
+; AVX2-NEXT:    vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX2-NEXT:    vpaddd %xmm5, %xmm0, %xmm4
+; AVX2-NEXT:    vpandn %xmm4, %xmm0, %xmm6
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %xmm0, %xmm6, %xmm7
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX2-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX2-NEXT:    vpsrlw $4, %xmm6, %xmm6
+; AVX2-NEXT:    vpand %xmm0, %xmm6, %xmm6
+; AVX2-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX2-NEXT:    vpaddb %xmm7, %xmm6, %xmm6
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX2-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm6, %xmm6
+; AVX2-NEXT:    vpaddd %xmm5, %xmm1, %xmm8
 ; AVX2-NEXT:    vpandn %xmm8, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm8
-; AVX2-NEXT:    vpand %xmm6, %xmm8, %xmm8
-; AVX2-NEXT:    vpshufb %xmm8, %xmm7, %xmm8
-; AVX2-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX2-NEXT:    vpaddd %xmm4, %xmm2, %xmm9
-; AVX2-NEXT:    vpandn %xmm9, %xmm2, %xmm2
-; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm9
-; AVX2-NEXT:    vpand %xmm6, %xmm9, %xmm9
-; AVX2-NEXT:    vpshufb %xmm9, %xmm7, %xmm9
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm9[0]
-; AVX2-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT:    vpaddb %xmm0, %xmm5, %xmm0
-; AVX2-NEXT:    vpaddd %xmm4, %xmm3, %xmm2
-; AVX2-NEXT:    vpandn %xmm2, %xmm3, %xmm2
-; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm3
-; AVX2-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX2-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm8[0],xmm3[0]
-; AVX2-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT:    vpsadbw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm8
+; AVX2-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
+; AVX2-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm1
+; AVX2-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX2-NEXT:    vpaddb %xmm1, %xmm8, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm1, %xmm1
+; AVX2-NEXT:    vpackuswb %xmm1, %xmm6, %xmm1
+; AVX2-NEXT:    vpaddd %xmm5, %xmm2, %xmm6
+; AVX2-NEXT:    vpandn %xmm6, %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm6
+; AVX2-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX2-NEXT:    vpsrlw $4, %xmm2, %xmm2
+; AVX2-NEXT:    vpand %xmm0, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpaddb %xmm6, %xmm2, %xmm2
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm2, %xmm2
+; AVX2-NEXT:    vpaddd %xmm5, %xmm3, %xmm5
+; AVX2-NEXT:    vpandn %xmm5, %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm0, %xmm3, %xmm5
+; AVX2-NEXT:    vpshufb %xmm5, %xmm4, %xmm5
+; AVX2-NEXT:    vpsrlw $4, %xmm3, %xmm3
+; AVX2-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX2-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX2-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX2-NEXT:    vpsadbw %xmm7, %xmm0, %xmm0
+; AVX2-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512VL-LABEL: widen_cttz_undef_v2i32_v8i32:



More information about the llvm-commits mailing list