[llvm] [X86] Fold EXPAND(X,Y,M) -> SELECT(M,X,Y) when M is a lowest bit mask (PR #179630)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 4 03:15:40 PST 2026


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/179630

>From c5cc6272cac7184bb1c71ee730b40ab2ed04a4f2 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 4 Feb 2026 09:26:26 +0000
Subject: [PATCH] [X86] Fold EXPAND(X,Y,M) -> SELECT(M,X,Y) when M is a lowest
 bit mask

If a EXPAND node mask is just the lowest bits, then we can replace it with a more general SELECT node, which can be cheaper and potentially allow predication.

Fixes #179008
---
 llvm/lib/Target/X86/X86ISelLowering.cpp                  | 9 +++++++++
 llvm/test/CodeGen/X86/avx512bwvl-arith.ll                | 2 +-
 llvm/test/CodeGen/X86/legalize-vec-assertzext.ll         | 4 ++--
 .../test/CodeGen/X86/vector-shuffle-combining-avx512f.ll | 8 ++++----
 4 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b2fac92676eaa..368cdeb6d0fce 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -43476,6 +43476,15 @@ static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
 
     return SDValue();
   }
+  case X86ISD::EXPAND: {
+    SDValue ExpVec = N.getOperand(0);
+    SDValue PassThru = N.getOperand(1);
+    SDValue ExpMask = N.getOperand(2);
+    if (auto *Msk = dyn_cast<ConstantSDNode>(peekThroughBitcasts(ExpMask)))
+      if (Msk->getAPIntValue().isMask())
+        return DAG.getSelect(DL, VT, ExpMask, ExpVec, PassThru);
+    return SDValue();
+  }
   case X86ISD::VPERMV: {
     // Combine VPERMV to VPERMV3 if the source operand can be freely split.
     SmallVector<int, 32> Mask;
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-arith.ll b/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
index 97ca0d88b7d4d..28ff1b3e2da0d 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
@@ -243,7 +243,7 @@ define i16 @PR90356(<16 x i1> %a) {
 ; CHECK-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
 ; CHECK-NEXT:    movb $63, %al
 ; CHECK-NEXT:    kmovd %eax, %k1
-; CHECK-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vptestmd %zmm0, %zmm0, %k0
 ; CHECK-NEXT:    kmovd %k0, %eax
 ; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
diff --git a/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll b/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
index 2cf37c68b8b40..1799dd3832aad 100644
--- a/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
+++ b/llvm/test/CodeGen/X86/legalize-vec-assertzext.ll
@@ -23,7 +23,7 @@ define i64 @widen_assertzext(ptr %x) nounwind {
 ; CHECK-NEXT:    callq test2 at PLT
 ; CHECK-NEXT:    movb $127, %al
 ; CHECK-NEXT:    kmovw %eax, %k1
-; CHECK-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    popq %rcx
@@ -41,7 +41,7 @@ define i64 @widen_assertzext_range_attr(ptr %x) nounwind {
 ; CHECK-NEXT:    callq test2 at PLT
 ; CHECK-NEXT:    movb $127, %al
 ; CHECK-NEXT:    kmovw %eax, %k1
-; CHECK-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm0 {%k1} {z}
 ; CHECK-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
 ; CHECK-NEXT:    vmovq %xmm0, %rax
 ; CHECK-NEXT:    popq %rcx
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
index ed1d9632d272d..3edc6a33f025f 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
@@ -1062,7 +1062,7 @@ define <8 x i64> @PR179008(ptr %p0) {
 ; X86-AVX512F-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512F-NEXT:    movb $31, %cl
 ; X86-AVX512F-NEXT:    kmovw %ecx, %k1
-; X86-AVX512F-NEXT:    vpexpandq (%eax), %zmm0 {%k1} {z}
+; X86-AVX512F-NEXT:    vmovdqu64 (%eax), %zmm0 {%k1} {z}
 ; X86-AVX512F-NEXT:    retl
 ;
 ; X86-AVX512BW-LABEL: PR179008:
@@ -1070,21 +1070,21 @@ define <8 x i64> @PR179008(ptr %p0) {
 ; X86-AVX512BW-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX512BW-NEXT:    movb $31, %cl
 ; X86-AVX512BW-NEXT:    kmovd %ecx, %k1
-; X86-AVX512BW-NEXT:    vpexpandq (%eax), %zmm0 {%k1} {z}
+; X86-AVX512BW-NEXT:    vmovdqu64 (%eax), %zmm0 {%k1} {z}
 ; X86-AVX512BW-NEXT:    retl
 ;
 ; X64-AVX512F-LABEL: PR179008:
 ; X64-AVX512F:       # %bb.0:
 ; X64-AVX512F-NEXT:    movb $31, %al
 ; X64-AVX512F-NEXT:    kmovw %eax, %k1
-; X64-AVX512F-NEXT:    vpexpandq (%rdi), %zmm0 {%k1} {z}
+; X64-AVX512F-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z}
 ; X64-AVX512F-NEXT:    retq
 ;
 ; X64-AVX512BW-LABEL: PR179008:
 ; X64-AVX512BW:       # %bb.0:
 ; X64-AVX512BW-NEXT:    movb $31, %al
 ; X64-AVX512BW-NEXT:    kmovd %eax, %k1
-; X64-AVX512BW-NEXT:    vpexpandq (%rdi), %zmm0 {%k1} {z}
+; X64-AVX512BW-NEXT:    vmovdqu64 (%rdi), %zmm0 {%k1} {z}
 ; X64-AVX512BW-NEXT:    retq
   %load = load <8 x i64>, ptr %p0, align 1
   %shuf = shufflevector <8 x i64> %load, <8 x i64> <i64 poison, i64 poison, i64 poison, i64 poison, i64 poison, i64 0, i64 0, i64 0>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 14, i32 15>



More information about the llvm-commits mailing list