[llvm] 82ef7e5 - [X86] combineVTRUNCSAT - attempt to recognise VTRUNCS/US(CONCAT(X,Y)) -> PACKSS/US(X,Y) folds. (#178707)

via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 8 06:10:06 PDT 2026


Author: Simon Pilgrim
Date: 2026-03-08T13:10:01Z
New Revision: 82ef7e54d3e644a26a834b240f590287cf3ee352

URL: https://github.com/llvm/llvm-project/commit/82ef7e54d3e644a26a834b240f590287cf3ee352
DIFF: https://github.com/llvm/llvm-project/commit/82ef7e54d3e644a26a834b240f590287cf3ee352.diff

LOG: [X86] combineVTRUNCSAT - attempt to recognise VTRUNCS/US(CONCAT(X,Y)) -> PACKSS/US(X,Y) folds. (#178707)

If we're just concatenating subvectors together to perform a saturated
truncate, see if we can perform PACK on the subvectors directly instead
- 256-bit PACK will require a post-shuffle, but this will typically fold
away in later shuffle combining and its probably better than changing
vector widths with concats.

Reference patch based off poor codegen identified in #169995

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/masked_packss.ll
    llvm/test/CodeGen/X86/masked_packus.ll
    llvm/test/CodeGen/X86/packss.ll
    llvm/test/CodeGen/X86/packus.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1ebfd5defdc40..8959ca924d2af 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55898,6 +55898,41 @@ static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
   return SDValue();
 }
 
+static SDValue combineVTRUNCSAT(SDNode *N, SelectionDAG &DAG,
+                                TargetLowering::DAGCombinerInfo &DCI) {
+  using namespace SDPatternMatch;
+  unsigned Opc = N->getOpcode();
+  EVT VT = N->getValueType(0);
+  unsigned EltSizeInBits = VT.getScalarSizeInBits();
+  assert((Opc == X86ISD::VTRUNCS || Opc == X86ISD::VTRUNCUS) &&
+         "Unexpected VTRUNC node");
+  assert((VT.is128BitVector() || VT.is256BitVector()) &&
+         "Unexpected VTRUNC node type");
+
+  // If the src was concatenated, see if PACKSS/US would be better.
+  SDValue Src;
+  if (EltSizeInBits <= 16 &&
+      (sd_match(N, m_UnaryOp(X86ISD::VTRUNCS, m_Value(Src))) ||
+       sd_match(N, m_UnaryOp(X86ISD::VTRUNCUS,
+                             m_SMaxLike(m_Value(Src), m_Zero())))) &&
+      (EltSizeInBits * 2) == Src.getScalarValueSizeInBits() &&
+      isFreeToSplitVector(Src, DAG)) {
+    SDLoc DL(N);
+    auto [LHS, RHS] = splitVector(Src, DAG, DL);
+    unsigned PackOpc = Opc == X86ISD::VTRUNCS ? X86ISD::PACKSS : X86ISD::PACKUS;
+    SDValue Pack = DAG.getNode(PackOpc, DL, VT, LHS, RHS);
+    if (VT.is128BitVector())
+      return Pack;
+    // Shuffle sub-lanes back to match the vtrunc sequential order.
+    return DAG.getBitcast(
+        VT, DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64,
+                        DAG.getBitcast(MVT::v4i64, Pack),
+                        getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG)));
+  }
+
+  return SDValue();
+}
+
 /// Returns the negated value if the node \p N flips sign of FP value.
 ///
 /// FP-negation node may have 
diff erent forms: FNEG(x), FXOR (x, 0x80000000)
@@ -62506,6 +62541,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::FNEG:           return combineFneg(N, DAG, DCI, Subtarget);
   case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
   case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG, DCI);
+  case X86ISD::VTRUNCS:
+  case X86ISD::VTRUNCUS:    return combineVTRUNCSAT(N, DAG, DCI);
   case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
   case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
   case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);

diff  --git a/llvm/test/CodeGen/X86/masked_packss.ll b/llvm/test/CodeGen/X86/masked_packss.ll
index 9daf74539776c..d84eaeaae60b7 100644
--- a/llvm/test/CodeGen/X86/masked_packss.ll
+++ b/llvm/test/CodeGen/X86/masked_packss.ll
@@ -16,11 +16,8 @@ define <16 x i8> @_mm_mask_packss_epi16_manual(<16 x i8> %src, i16 noundef %k, <
 ;
 ; AVX512-LABEL: _mm_mask_packss_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovswb %ymm1, %xmm0 {%k1}
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    vpacksswb %xmm2, %xmm1, %xmm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 -128))
@@ -46,11 +43,8 @@ define <32 x i8> @_mm256_mask_packss_epi16_manual(<32 x i8> %src, i32 noundef %k
 ;
 ; AVX512-LABEL: _mm256_mask_packss_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovswb %zmm1, %ymm0 {%k1}
+; AVX512-NEXT:    vpacksswb %ymm2, %ymm1, %ymm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %minv = tail call <32 x i16> @llvm.smax.v32i16(<32 x i16> %sh, <32 x i16> splat (i16 -128))
@@ -114,11 +108,8 @@ define <8 x i16> @_mm_mask_packss_epi32_manual(<8 x i16> %src, i8 noundef %k, <4
 ;
 ; AVX512-LABEL: _mm_mask_packss_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovsdw %ymm1, %xmm0 {%k1}
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    vpackssdw %xmm2, %xmm1, %xmm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %minv = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %sh, <8 x i32> splat (i32 -32768))
@@ -143,11 +134,8 @@ define <16 x i16> @_mm256_mask_packss_epi32_manual(<16 x i16> %src, i16 noundef
 ;
 ; AVX512-LABEL: _mm256_mask_packss_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovsdw %zmm1, %ymm0 {%k1}
+; AVX512-NEXT:    vpackssdw %ymm2, %ymm1, %ymm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i32> @llvm.smax.v16i32(<16 x i32> %sh, <16 x i32> splat (i32 -32768))

diff  --git a/llvm/test/CodeGen/X86/masked_packus.ll b/llvm/test/CodeGen/X86/masked_packus.ll
index 65aa90173e3e6..52507542945c7 100644
--- a/llvm/test/CodeGen/X86/masked_packus.ll
+++ b/llvm/test/CodeGen/X86/masked_packus.ll
@@ -16,13 +16,8 @@ define <16 x i8> @_mm_mask_packus_epi16_manual(<16 x i8> %src, i16 noundef %k, <
 ;
 ; AVX512-LABEL: _mm_mask_packus_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vpmaxsw %ymm2, %ymm1, %ymm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovuswb %ymm1, %xmm0 {%k1}
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    vpackuswb %xmm2, %xmm1, %xmm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 0))
@@ -48,13 +43,8 @@ define <32 x i8> @_mm256_mask_packus_epi16_manual(<32 x i8> %src, i32 noundef %k
 ;
 ; AVX512-LABEL: _mm256_mask_packus_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vpmaxsw %zmm2, %zmm1, %zmm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovuswb %zmm1, %ymm0 {%k1}
+; AVX512-NEXT:    vpackuswb %ymm2, %ymm1, %ymm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %minv = tail call <32 x i16> @llvm.smax.v32i16(<32 x i16> %sh, <32 x i16> splat (i16 0))
@@ -121,13 +111,8 @@ define <8 x i16> @_mm_mask_packus_epi32_manual(<8 x i16> %src, i8 noundef %k, <4
 ;
 ; AVX512-LABEL: _mm_mask_packus_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vpmaxsd %ymm2, %ymm1, %ymm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovusdw %ymm1, %xmm0 {%k1}
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    vpackusdw %xmm2, %xmm1, %xmm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %minv = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %sh, <8 x i32> splat (i32 0))
@@ -152,13 +137,8 @@ define <16 x i16> @_mm256_mask_packus_epi32_manual(<16 x i16> %src, i16 noundef
 ;
 ; AVX512-LABEL: _mm256_mask_packus_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vpmaxsd %zmm2, %zmm1, %zmm1
 ; AVX512-NEXT:    kmovd %edi, %k1
-; AVX512-NEXT:    vpmovusdw %zmm1, %ymm0 {%k1}
+; AVX512-NEXT:    vpackusdw %ymm2, %ymm1, %ymm0 {%k1}
 ; AVX512-NEXT:    retq
   %sh = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i32> @llvm.smax.v16i32(<16 x i32> %sh, <16 x i32> splat (i32 0))

diff  --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll
index 1c61b1eb84cd5..da739dc277f68 100644
--- a/llvm/test/CodeGen/X86/packss.ll
+++ b/llvm/test/CodeGen/X86/packss.ll
@@ -431,23 +431,10 @@ define <16 x i8> @_mm_packss_epi16_manual(<8 x i16> %a, <8 x i16> %b) nounwind {
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX1-LABEL: _mm_packss_epi16_manual:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    ret{{[l|q]}}
-;
-; AVX2-LABEL: _mm_packss_epi16_manual:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    ret{{[l|q]}}
-;
-; AVX512-LABEL: _mm_packss_epi16_manual:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovswb %ymm0, %xmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: _mm_packss_epi16_manual:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %sh   = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 -128))
   %sat  = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %minv, <16 x i16> splat (i16 127))
@@ -484,10 +471,7 @@ define <32 x i8> @_mm256_packss_epi16_manual(<16 x i16> %a, <16 x i16> %b) nounw
 ;
 ; AVX512-LABEL: _mm256_packss_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpmovswb %zmm0, %ymm0
+; AVX512-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    ret{{[l|q]}}
 ;
 ; X64-SSE-LABEL: _mm256_packss_epi16_manual:
@@ -600,23 +584,10 @@ define <8 x i16> @_mm_packss_epi32_manual(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; SSE-NEXT:    packssdw %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX1-LABEL: _mm_packss_epi32_manual:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    ret{{[l|q]}}
-;
-; AVX2-LABEL: _mm_packss_epi32_manual:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    ret{{[l|q]}}
-;
-; AVX512-LABEL: _mm_packss_epi32_manual:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovsdw %ymm0, %xmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: _mm_packss_epi32_manual:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %sh   = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %minv = tail call <8 x i32> @llvm.smax.v8i32(<8 x i32> %sh, <8 x i32> splat (i32 -32768))
   %sat  = tail call <8 x i32> @llvm.smin.v8i32(<8 x i32> %minv, <8 x i32> splat (i32 32767))
@@ -653,10 +624,7 @@ define <16 x i16> @_mm256_packss_epi32_manual(<8 x i32> %a, <8 x i32> %b) nounwi
 ;
 ; AVX512-LABEL: _mm256_packss_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpmovsdw %zmm0, %ymm0
+; AVX512-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    ret{{[l|q]}}
 ;
 ; X64-SSE-LABEL: _mm256_packss_epi32_manual:

diff  --git a/llvm/test/CodeGen/X86/packus.ll b/llvm/test/CodeGen/X86/packus.ll
index c0941ef19d5c0..38fd914f1f947 100644
--- a/llvm/test/CodeGen/X86/packus.ll
+++ b/llvm/test/CodeGen/X86/packus.ll
@@ -517,25 +517,10 @@ define <16 x i8> @_mm_packus_epi16_manual(<8 x i16> %a, <8 x i16> %b) nounwind {
 ; SSE-NEXT:    packuswb %xmm1, %xmm0
 ; SSE-NEXT:    ret{{[l|q]}}
 ;
-; AVX1-LABEL: _mm_packus_epi16_manual:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    ret{{[l|q]}}
-;
-; AVX2-LABEL: _mm_packus_epi16_manual:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    ret{{[l|q]}}
-;
-; AVX512-LABEL: _mm_packus_epi16_manual:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovuswb %ymm0, %xmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: _mm_packus_epi16_manual:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %sh  = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i16> @llvm.smax.v16i16(<16 x i16> %sh, <16 x i16> splat (i16 0))
   %sat = tail call <16 x i16> @llvm.smin.v16i16(<16 x i16> %minv, <16 x i16> splat (i16 255))
@@ -578,12 +563,7 @@ define <32 x i8> @_mm256_packus_epi16_manual(<16 x i16> %a, <16 x i16> %b) nounw
 ;
 ; AVX512-LABEL: _mm256_packus_epi16_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpmovuswb %zmm0, %ymm0
+; AVX512-NEXT:    vpackuswb %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    ret{{[l|q]}}
   %sh  = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   %minv = tail call <32 x i16> @llvm.smax.v32i16(<32 x i16> %sh, <32 x i16> splat (i16 0))
@@ -747,25 +727,10 @@ define <8 x i16> @_mm_packus_epi32_manual(<4 x i32> %a, <4 x i32> %b) nounwind {
 ; SSE4-NEXT:    packusdw %xmm1, %xmm0
 ; SSE4-NEXT:    ret{{[l|q]}}
 ;
-; AVX1-LABEL: _mm_packus_epi32_manual:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    ret{{[l|q]}}
-;
-; AVX2-LABEL: _mm_packus_epi32_manual:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    ret{{[l|q]}}
-;
-; AVX512-LABEL: _mm_packus_epi32_manual:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovusdw %ymm0, %xmm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    ret{{[l|q]}}
+; AVX-LABEL: _mm_packus_epi32_manual:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
   %sh  = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %minv = tail call <8 x i32> @llvm.smax.v32i32(<8 x i32> %sh, <8 x i32> splat (i32 0))
   %sat = tail call <8 x i32> @llvm.smin.v32i32(<8 x i32> %minv, <8 x i32> splat (i32 65535))
@@ -911,12 +876,7 @@ define <16 x i16> @_mm256_packus_epi32_manual(<8 x i32> %a, <8 x i32> %b) nounwi
 ;
 ; AVX512-LABEL: _mm256_packus_epi32_manual:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpmovusdw %zmm0, %ymm0
+; AVX512-NEXT:    vpackusdw %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    ret{{[l|q]}}
   %sh  = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
   %minv = tail call <16 x i32> @llvm.smax.v32i32(<16 x i32> %sh, <16 x i32> splat (i32 0))


        


More information about the llvm-commits mailing list