[llvm] e74d834 - [X86] combineConcatVectorOps - concat mixed v2f64/v4f64 faux shuffles into v4f64/v8f64 vshufpd (#143521)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 10 07:48:40 PDT 2025


Author: Simon Pilgrim
Date: 2025-06-10T15:48:37+01:00
New Revision: e74d834cb155a894fa0f9dbd1483b7fef53a79ae

URL: https://github.com/llvm/llvm-project/commit/e74d834cb155a894fa0f9dbd1483b7fef53a79ae
DIFF: https://github.com/llvm/llvm-project/commit/e74d834cb155a894fa0f9dbd1483b7fef53a79ae.diff

LOG: [X86] combineConcatVectorOps - concat mixed v2f64/v4f64 faux shuffles into v4f64/v8f64 vshufpd (#143521)

Replace getTargetShuffleMask call (only permitted for target shuffles)
and use getTargetShuffleInputs instead to match various faux shuffles
(insert+extract sequences in particular).

This does mean we have to explicitly bail out with undef/zero mask
elements, where before getTargetShuffleMask would handle them.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/avx-insertelt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 82427e826ee31..0fc47436dbf16 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -59304,23 +59304,17 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
   // We can always convert per-lane vXf64 shuffles into VSHUFPD.
   if (!IsSplat &&
       (VT == MVT::v4f64 || (VT == MVT::v8f64 && Subtarget.useAVX512Regs())) &&
-      all_of(Ops, [](SDValue Op) {
-        return Op.hasOneUse() && (Op.getOpcode() == X86ISD::MOVDDUP ||
-                                  Op.getOpcode() == X86ISD::SHUFP ||
-                                  Op.getOpcode() == X86ISD::VPERMILPI ||
-                                  Op.getOpcode() == X86ISD::BLENDI ||
-                                  Op.getOpcode() == X86ISD::UNPCKL ||
-                                  Op.getOpcode() == X86ISD::UNPCKH);
-      })) {
+      all_of(Ops, [](SDValue Op) { return Op.hasOneUse(); })) {
     // Collect the individual per-lane v2f64/v4f64 shuffles.
     MVT OpVT = Ops[0].getSimpleValueType();
     unsigned NumOpElts = OpVT.getVectorNumElements();
     SmallVector<SmallVector<SDValue, 2>, 4> SrcOps(NumOps);
     SmallVector<SmallVector<int, 8>, 4> SrcMasks(NumOps);
     if (all_of(seq<int>(NumOps), [&](int I) {
-          return getTargetShuffleMask(Ops[I], /*AllowSentinelZero=*/false,
-                                      SrcOps[I], SrcMasks[I]) &&
+          return getTargetShuffleInputs(Ops[I], SrcOps[I], SrcMasks[I], DAG,
+                                        Depth + 1) &&
                  !is128BitLaneCrossingShuffleMask(OpVT, SrcMasks[I]) &&
+                 none_of(SrcMasks[I], isUndefOrZero) &&
                  SrcMasks[I].size() == NumOpElts &&
                  all_of(SrcOps[I], [&OpVT](SDValue V) {
                    return V.getValueType() == OpVT;

diff  --git a/llvm/test/CodeGen/X86/avx-insertelt.ll b/llvm/test/CodeGen/X86/avx-insertelt.ll
index 18ca01290c914..95a3169a5b161 100644
--- a/llvm/test/CodeGen/X86/avx-insertelt.ll
+++ b/llvm/test/CodeGen/X86/avx-insertelt.ll
@@ -221,10 +221,9 @@ define <8 x float> @insert_f32_firstelts(<8 x float> %x, float %s) {
 define <4 x double> @insert_f64_firstelts(<4 x double> %x, double %s) {
 ; AVX-LABEL: insert_f64_firstelts:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm0[2,3]
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX-NEXT:    retq
 ;
 ; AVX2-LABEL: insert_f64_firstelts:


        


More information about the llvm-commits mailing list