[llvm] 4b529f8 - [X86] Fold extractsubvector(permv3(src0, mask, src1), c) -> extractsubvector(permv3(src0,widensubvector(extractsubvector(mask,c)),src1),0) iff c != 0
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 18 04:08:51 PDT 2024
Author: Simon Pilgrim
Date: 2024-09-18T12:06:08+01:00
New Revision: 4b529f840c7a28245f4462d9fde34f1686e96351
URL: https://github.com/llvm/llvm-project/commit/4b529f840c7a28245f4462d9fde34f1686e96351
DIFF: https://github.com/llvm/llvm-project/commit/4b529f840c7a28245f4462d9fde34f1686e96351.diff
LOG: [X86] Fold extractsubvector(permv3(src0,mask,src1),c) -> extractsubvector(permv3(src0,widensubvector(extractsubvector(mask,c)),src1),0) iff c != 0
For cross-lane shuffles, extract the mask operand (uppper) subvector directly, and make use of the free implicit extraction of the lowest subvector of the result.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9bc5f2c9399574..182f6c08366a99 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -57791,6 +57791,19 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
DAG.getTargetConstant(M, DL, MVT::i8));
}
break;
+ case X86ISD::VPERMV3:
+ if (IdxVal != 0) {
+ SDValue Src0 = InVec.getOperand(0);
+ SDValue Mask = InVec.getOperand(1);
+ SDValue Src1 = InVec.getOperand(2);
+ Mask = extractSubVector(Mask, IdxVal, DAG, DL, SizeInBits);
+ Mask = widenSubVector(Mask, /*ZeroNewElements=*/false, Subtarget, DAG,
+ DL, InSizeInBits);
+ SDValue Shuffle =
+ DAG.getNode(InOpcode, DL, InVecVT, Src0, Mask, Src1);
+ return extractSubVector(Shuffle, 0, DAG, DL, SizeInBits);
+ }
+ break;
}
}
}
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index ed316990e48666..f616eafc24272e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -240,21 +240,17 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm6
; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [13,4,6,7,13,4,6,7]
-; AVX512-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
; AVX512-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [6,13,6,7,6,13,6,7]
-; AVX512-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm8, %xmm1
; AVX512-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512-FCP-NEXT: vmovq %xmm4, (%rcx)
; AVX512-FCP-NEXT: vmovq %xmm5, (%r8)
; AVX512-FCP-NEXT: vmovq %xmm0, (%r9)
; AVX512-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512-FCP-NEXT: vmovq %xmm8, (%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -309,21 +305,17 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm6
; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [13,4,6,7,13,4,6,7]
-; AVX512DQ-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
; AVX512DQ-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [6,13,6,7,6,13,6,7]
-; AVX512DQ-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm8, %xmm1
; AVX512DQ-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-FCP-NEXT: vmovq %xmm4, (%rcx)
; AVX512DQ-FCP-NEXT: vmovq %xmm5, (%r8)
; AVX512DQ-FCP-NEXT: vmovq %xmm0, (%r9)
; AVX512DQ-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512DQ-FCP-NEXT: vmovq %xmm8, (%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
@@ -378,21 +370,17 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [13,4,6,7,13,4,6,7]
-; AVX512BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
; AVX512BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [6,13,6,7,6,13,6,7]
-; AVX512BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm8, %xmm1
; AVX512BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512BW-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512BW-FCP-NEXT: vmovq %xmm4, (%rcx)
; AVX512BW-FCP-NEXT: vmovq %xmm5, (%r8)
; AVX512BW-FCP-NEXT: vmovq %xmm0, (%r9)
; AVX512BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512BW-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512BW-FCP-NEXT: vmovq %xmm8, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -447,21 +435,17 @@ define void @load_i32_stride7_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm6
; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [13,4,6,7,13,4,6,7]
-; AVX512DQ-BW-FCP-NEXT: # ymm7 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm7 = [13,4,6,7]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm6, %ymm1, %ymm7
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [6,13,6,7,6,13,6,7]
-; AVX512DQ-BW-FCP-NEXT: # ymm8 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm8 = [6,13,6,7]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm6, %ymm8
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm8, %xmm1
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, (%rsi)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm3, (%rdx)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm4, (%rcx)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm5, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, (%r9)
; AVX512DQ-BW-FCP-NEXT: vmovq %xmm7, (%r10)
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm1, (%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm8, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%wide.vec = load <14 x i32>, ptr %in.vec, align 64
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index 2fd173c729170b..872a8d00cc2343 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -226,10 +226,8 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
; AVX512-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
@@ -293,10 +291,8 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512DQ-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
; AVX512DQ-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
@@ -360,10 +356,8 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512BW-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512BW-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
; AVX512BW-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512BW-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512BW-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
@@ -427,10 +421,8 @@ define void @load_i32_stride8_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-BW-FCP-NEXT: vpunpckldq {{.*#+}} ymm5 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[4],ymm1[4],ymm4[5],ymm1[5]
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [5,13,5,5,5,13,5,5]
-; AVX512DQ-BW-FCP-NEXT: # ymm6 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} xmm6 = [5,13,5,5]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %ymm1, %ymm4, %ymm6
-; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm6, %xmm6
; AVX512DQ-BW-FCP-NEXT: vpunpckhdq {{.*#+}} ymm1 = ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[6],ymm1[6],ymm4[7],ymm1[7]
; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX512DQ-BW-FCP-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
More information about the llvm-commits
mailing list