[llvm] [X86] combineConcatVectorOps - convert X86ISD::BLENDI concatenation to use combineConcatVectorOps recursion (PR #131121)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 13 03:48:26 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Simon Pilgrim (RKSimon)
<details>
<summary>Changes</summary>
Only concatenate X86ISD::BLENDI nodes if at least one operand is beneficial to concatenate
Add AVX1/AVX2 handling to 256-bit BLENDI nodes (accounting for AVX2 v16i16 repeated mask requirements).
Extend existing AVX512BW (which still always concats until I get get rid of the remaining regressions) to handle AVX512F for 32/64-bit scalar types.
---
Patch is 221.13 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/131121.diff
10 Files Affected:
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+36-10)
- (modified) llvm/test/CodeGen/X86/masked_store.ll (+24-21)
- (modified) llvm/test/CodeGen/X86/vector-fshl-rot-256.ll (+19-27)
- (modified) llvm/test/CodeGen/X86/vector-fshr-rot-256.ll (+19-27)
- (modified) llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll (+1251-1236)
- (modified) llvm/test/CodeGen/X86/vector-rotate-256.ll (+19-27)
- (modified) llvm/test/CodeGen/X86/vector-shift-ashr-256.ll (+12-16)
- (modified) llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll (+4-6)
- (modified) llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll (+5-7)
- (modified) llvm/test/CodeGen/X86/vector-shuffle-combining.ll (+10-13)
``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 810a6c193db68..d4367491ab541 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58512,16 +58512,42 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
}
break;
case X86ISD::BLENDI:
- if (NumOps == 2 && VT.is512BitVector() && Subtarget.useBWIRegs()) {
- unsigned NumElts = VT.getVectorNumElements();
- APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
- Mask.insertBits(getBLENDIBlendMask(Ops[1]), NumElts / 2);
- MVT MaskSVT = MVT::getIntegerVT(NumElts);
- MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
- SDValue Sel =
- DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
- return DAG.getSelect(DL, VT, Sel, ConcatSubOperand(VT, Ops, 1),
- ConcatSubOperand(VT, Ops, 0));
+ if (VT.is256BitVector() && NumOps == 2 &&
+ (EltSizeInBits >= 32 ||
+ (Subtarget.hasInt256() &&
+ Ops[0].getOperand(2) == Ops[1].getOperand(2)))) {
+ SDValue Concat0 = CombineSubOperand(VT, Ops, 0);
+ SDValue Concat1 = CombineSubOperand(VT, Ops, 1);
+ if (Concat0 || Concat1) {
+ unsigned NumElts = VT.getVectorNumElements();
+ APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
+ Mask.insertBits(getBLENDIBlendMask(Ops[1]), NumElts / 2);
+ Mask = Mask.zextOrTrunc(8);
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0),
+ Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1),
+ DAG.getTargetConstant(Mask, DL, MVT::i8));
+ }
+ }
+ // TODO: BWI targets should only use CombineSubOperand.
+ if (((VT.is256BitVector() && Subtarget.hasVLX()) ||
+ (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
+ (EltSizeInBits >= 32 || Subtarget.useBWIRegs())) {
+ SDValue Concat0 = CombineSubOperand(VT, Ops, 0);
+ SDValue Concat1 = CombineSubOperand(VT, Ops, 1);
+ if (Concat0 || Concat1 || Subtarget.useBWIRegs()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ APInt Mask = getBLENDIBlendMask(Ops[0]).zext(NumElts);
+ for (unsigned I = 1; I != NumOps; ++I)
+ Mask.insertBits(getBLENDIBlendMask(Ops[I]), I * (NumElts / NumOps));
+ MVT MaskSVT = MVT::getIntegerVT(NumElts);
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
+ SDValue Sel =
+ DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
+ Concat0 = Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0);
+ Concat1 = Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1);
+ return DAG.getSelect(DL, VT, Sel, Concat1, Concat0);
+ }
}
break;
case ISD::VSELECT:
diff --git a/llvm/test/CodeGen/X86/masked_store.ll b/llvm/test/CodeGen/X86/masked_store.ll
index c7ec5e87dcc6b..c7320275091c6 100644
--- a/llvm/test/CodeGen/X86/masked_store.ll
+++ b/llvm/test/CodeGen/X86/masked_store.ll
@@ -5882,9 +5882,9 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
;
; AVX1-LABEL: store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts:
; AVX1: ## %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %ymm0
-; AVX1-NEXT: vmovaps 32(%rsi), %ymm1
-; AVX1-NEXT: vmovaps 64(%rsi), %ymm2
+; AVX1-NEXT: vmovaps (%rsi), %ymm1
+; AVX1-NEXT: vmovaps 32(%rsi), %ymm2
+; AVX1-NEXT: vmovdqa 64(%rsi), %ymm0
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd 48(%rdi), %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtd 32(%rdi), %xmm3, %xmm5
@@ -5894,25 +5894,28 @@ define void @store_v24i32_v24i32_stride6_vf4_only_even_numbered_elts(ptr %trigge
; AVX1-NEXT: vpcmpgtd 64(%rdi), %xmm3, %xmm6
; AVX1-NEXT: vpcmpgtd 16(%rdi), %xmm3, %xmm7
; AVX1-NEXT: vpcmpgtd (%rdi), %xmm3, %xmm8
-; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm3[2,3],xmm8[4,5],xmm3[6,7]
-; AVX1-NEXT: vpslld $31, %xmm8, %xmm8
-; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm3[2,3],xmm7[4,5],xmm3[6,7]
-; AVX1-NEXT: vpslld $31, %xmm7, %xmm7
-; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
-; AVX1-NEXT: vmaskmovps %ymm0, %ymm7, (%rdx)
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7]
-; AVX1-NEXT: vpslld $31, %xmm5, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX1-NEXT: vmaskmovps %ymm2, %ymm0, 64(%rdx)
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX1-NEXT: vpslld $31, %xmm4, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $31, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vmaskmovps %ymm2, %ymm3, 32(%rdx)
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm2
+; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX1-NEXT: vpslld $31, %xmm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vpslld $31, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT: vmaskmovps %ymm1, %ymm0, 32(%rdx)
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT: vmaskmovps %ymm1, %ymm2, (%rdx)
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmaskmovps %ymm0, %ymm1, 64(%rdx)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index f0f3d6028c27a..9e872cc6d74a9 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -139,12 +139,6 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vpmuludq %xmm2, %xmm6, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7]
-; AVX1-NEXT: vpor %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
@@ -152,13 +146,15 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm1 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
+; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm3[0,0,2,2,4,4,6,6]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_funnnel_v8i32:
@@ -1085,24 +1081,20 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX1-NEXT: vmovsldup {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_funnnel_v8i32:
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index c66a3f709c5ca..5cac190eae690 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -146,12 +146,6 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm6, %xmm8, %xmm6
-; AVX1-NEXT: vpmuludq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7]
-; AVX1-NEXT: vpor %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
@@ -160,13 +154,15 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm1 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
+; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm3[0,0,2,2,4,4,6,6]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_funnnel_v8i32:
@@ -1136,24 +1132,20 @@ define <4 x i64> @constant_funnnel_v4i64(<4 x i64> %x) nounwind {
define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x) nounwind {
; AVX1-LABEL: constant_funnnel_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovshdup {{.*#+}} ymm2 = ymm0[1,1,3,3,5,5,7,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX1-NEXT: vmovsldup {{.*#+}} ymm1 = ymm1[0,0,2,2,4,4,6,6]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_funnnel_v8i32:
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 5980ac1e4e2a3..e692f8ac19d2e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -3913,222 +3913,228 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX512-LABEL: store_i16_stride6_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa 32(%rcx), %ymm4
-; AVX512-NEXT: vpsrldq {{.*#+}} ymm0 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vmovdqa 32(%rdx), %ymm8
-; AVX512-NEXT: vpsrldq {{.*#+}} ymm1 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: subq $40, %rsp
+; AVX512-NEXT: vmovdqa 32(%rcx), %ymm5
+; AVX512-NEXT: vpsrldq {{.*#+}} ymm0 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vmovdqa 32(%rdx), %ymm2
+; AVX512-NEXT: vpsrldq {{.*#+}} ymm1 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX512-NEXT: vpunpckhwd {{.*#+}} ymm1 = ymm8[4],ymm4[4],ymm8[5],ymm4[5],ymm8[6],ymm4[6],ymm8[7],ymm4[7],ymm8[12],ymm4[12],ymm8[13],ymm4[13],ymm8[14],ymm4[14],ymm8[15],ymm4[15]
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,2,3,3,5,6,7,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa 32(%rsi), %ymm9
-; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm9[2,1,2,3,6,5,6,7]
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
+; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7]
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa 32(%rsi), %ymm15
+; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm15[2,1,2,3,6,5,6,7]
+; AVX512-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm7
+; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm7[2,1,2,3,6,5,6,7]
; AVX512-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm11
-; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm11[2,1,2,3,6,5,6,7]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm2 = ymm2[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm11[4],ymm9[4],ymm11[5],ymm9[5],ymm11[6],ymm9[6],ymm11[7],ymm9[7],ymm11[12],ymm9[12],ymm11[13],ymm9[13],ymm11[14],ymm9[14],ymm11[15],ymm9[15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm2
-; AVX512-NEXT: movw $18724, %ax # imm = 0x4924
-; AVX512-NEXT: kmovw %eax, %k1
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1}
-; AVX512-NEXT: vextracti64x4 $1, %zmm2, %ymm0
-; AVX512-NEXT: vmovdqa 32(%r8), %ymm12
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31]
-; AVX512-NEXT: vpshufb %ymm1, %ymm12, %ymm3
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
-; AVX512-NEXT: vpshuflw {{.*#+}} ymm3 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm16
-; AVX512-NEXT: vmovdqa (%rcx), %ymm2
-; AVX512-NEXT: vpsrldq {{.*#+}} ymm0 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vmovdqa (%rdx), %ymm3
-; AVX512-NEXT: vpsrldq {{.*#+}} ymm5 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11]
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX512-NEXT: vpunpckhwd {{....
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/131121
More information about the llvm-commits
mailing list