[llvm] f2192b0 - [X86] combineConcatVectorOps - fix typo where we were comparing the same subvector types (#173075)

via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 19 11:01:55 PST 2025


Author: Simon Pilgrim
Date: 2025-12-19T19:01:50Z
New Revision: f2192b0b64cae114ea32a5762f7ed51873f62e00

URL: https://github.com/llvm/llvm-project/commit/f2192b0b64cae114ea32a5762f7ed51873f62e00
DIFF: https://github.com/llvm/llvm-project/commit/f2192b0b64cae114ea32a5762f7ed51873f62e00.diff

LOG: [X86] combineConcatVectorOps - fix typo where we were comparing the same subvector types (#173075)

Make it clearer that the subvector sources of a concat(extend_vector_inreg(x),extend_vector_inreg(y)) pair must have the same type.

Fixes #173030

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 68d5e6ce199bc..796c227fcb02c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -59495,10 +59495,10 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
            (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
             (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
-          Op0.getOperand(0).getValueType().is128BitVector() &&
-          Op0.getOperand(0).getValueType() ==
-              Ops[0].getOperand(0).getValueType()) {
-        EVT SrcVT = Op0.getOperand(0).getValueType();
+          Ops[0].getOperand(0).getValueType().is128BitVector() &&
+          Ops[0].getOperand(0).getValueType() ==
+              Ops[1].getOperand(0).getValueType()) {
+        EVT SrcVT = Ops[0].getOperand(0).getValueType();
         unsigned NumElts = VT.getVectorNumElements();
         MVT UnpackSVT =
             MVT::getIntegerVT(SrcVT.getScalarSizeInBits() * (NumElts / 2));

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 85a42f73bb959..6f5a6e0fc031d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -1258,18 +1258,19 @@ define <8 x float> @PR173030(i8 %a0, i16 %a1, i32 %a2) {
 ; X86-NEXT:    # kill: def $cl killed $cl killed $ecx def $ecx
 ; X86-NEXT:    incb %cl
 ; X86-NEXT:    vpinsrb $1, %ecx, %xmm0, %xmm0
+; X86-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    vmovd %ecx, %xmm1
 ; X86-NEXT:    incl %ecx
 ; X86-NEXT:    vpinsrw $1, %ecx, %xmm1, %xmm1
-; X86-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    vmovd %eax, %xmm1
+; X86-NEXT:    vpmovsxwd %xmm1, %xmm1
+; X86-NEXT:    vmovd %eax, %xmm2
 ; X86-NEXT:    incl %eax
-; X86-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X86-NEXT:    vcvtdq2ps %xmm1, %xmm1
-; X86-NEXT:    vpmovsxbd %xmm0, %ymm0
+; X86-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
+; X86-NEXT:    vcvtdq2ps %xmm2, %xmm2
+; X86-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; X86-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; X86-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
 ; X86-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X86-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; X86-NEXT:    vbroadcastss {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
@@ -1282,17 +1283,18 @@ define <8 x float> @PR173030(i8 %a0, i16 %a1, i32 %a2) {
 ; X64-NEXT:    leal 1(%rdi), %eax
 ; X64-NEXT:    vmovd %edi, %xmm0
 ; X64-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0
+; X64-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; X64-NEXT:    vmovd %esi, %xmm1
 ; X64-NEXT:    incl %esi
 ; X64-NEXT:    vpinsrw $1, %esi, %xmm1, %xmm1
-; X64-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    vmovd %edx, %xmm1
+; X64-NEXT:    vpmovsxwd %xmm1, %xmm1
+; X64-NEXT:    vmovd %edx, %xmm2
 ; X64-NEXT:    incl %edx
-; X64-NEXT:    vpinsrd $1, %edx, %xmm1, %xmm1
-; X64-NEXT:    vcvtdq2ps %xmm1, %xmm1
-; X64-NEXT:    vpmovsxbd %xmm0, %ymm0
+; X64-NEXT:    vpinsrd $1, %edx, %xmm2, %xmm2
+; X64-NEXT:    vcvtdq2ps %xmm2, %xmm2
+; X64-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; X64-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
 ; X64-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; X64-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
 ; X64-NEXT:    vbroadcastss {{.*#+}} ymm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]


        


More information about the llvm-commits mailing list