[llvm] 4b38310 - [X86] combineConcatVectorOps - support concatenation of 128->256-bit X86ISD::PCMPEQ/PCMPGT nodes on AVX2+ targets
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 29 06:33:44 PDT 2023
Author: Simon Pilgrim
Date: 2023-08-29T14:33:22+01:00
New Revision: 4b383107fa7585bb5ecd7f03cab7800b33d1585a
URL: https://github.com/llvm/llvm-project/commit/4b383107fa7585bb5ecd7f03cab7800b33d1585a
DIFF: https://github.com/llvm/llvm-project/commit/4b383107fa7585bb5ecd7f03cab7800b33d1585a.diff
LOG: [X86] combineConcatVectorOps - support concatenation of 128->256-bit X86ISD::PCMPEQ/PCMPGT nodes on AVX2+ targets
Only bother if one of the ops is free to concat (most likely compare with constant).
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/widen_bitcnt.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 4d0fe3e24cb570..0f6f90c7907cf2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -54668,6 +54668,15 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
ConcatSubOperand(SrcVT, Ops, 1));
}
break;
+ case X86ISD::PCMPEQ:
+ case X86ISD::PCMPGT:
+ if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256() &&
+ (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1))) {
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ ConcatSubOperand(VT, Ops, 0),
+ ConcatSubOperand(VT, Ops, 1));
+ }
+ break;
case ISD::CTPOP:
case ISD::CTTZ:
case ISD::CTLZ:
diff --git a/llvm/test/CodeGen/X86/widen_bitcnt.ll b/llvm/test/CodeGen/X86/widen_bitcnt.ll
index aa7fed6c9f053d..50f19d31183e5e 100644
--- a/llvm/test/CodeGen/X86/widen_bitcnt.ll
+++ b/llvm/test/CodeGen/X86/widen_bitcnt.ll
@@ -632,67 +632,46 @@ define <8 x i32> @widen_ctlz_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>
; AVX2: # %bb.0:
; AVX2-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm5
-; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpand %xmm6, %xmm5, %xmm5
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm7
-; AVX2-NEXT: vpsrlw $4, %xmm1, %xmm8
-; AVX2-NEXT: vpand %xmm6, %xmm8, %xmm8
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm8, %xmm8
-; AVX2-NEXT: vpsrlw $4, %xmm2, %xmm9
-; AVX2-NEXT: vpand %xmm6, %xmm9, %xmm9
-; AVX2-NEXT: vpsrlw $4, %xmm3, %xmm10
-; AVX2-NEXT: vpand %xmm6, %xmm10, %xmm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm6, %xmm6
-; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm8, %ymm6
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm8
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; AVX2-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm11
-; AVX2-NEXT: vpand %ymm6, %ymm11, %ymm6
-; AVX2-NEXT: vpsrlw $4, %ymm8, %ymm8
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm11 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpand %ymm11, %ymm8, %ymm8
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm8
-; AVX2-NEXT: vpaddb %ymm6, %ymm8, %ymm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm8
-; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm7, %ymm7
-; AVX2-NEXT: vpsrlw $8, %ymm7, %ymm7
-; AVX2-NEXT: vpand %ymm7, %ymm6, %ymm7
-; AVX2-NEXT: vpsrlw $8, %ymm6, %ymm6
-; AVX2-NEXT: vpaddw %ymm7, %ymm6, %ymm6
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm3
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpxor %xmm7, %xmm7, %xmm7
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm5, %ymm8
+; AVX2-NEXT: vpand %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX2-NEXT: vpaddb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm1, %ymm5
+; AVX2-NEXT: vpsrlw $8, %ymm5, %ymm5
+; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX2-NEXT: vpaddw %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqw %ymm7, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpand %ymm1, %ymm6, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
-; AVX2-NEXT: vpaddd %ymm1, %ymm6, %ymm1
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm5, %xmm5
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm9, %xmm7
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm2, %xmm7
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm4
+; AVX2-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm2
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX2-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm4, %ymm5
; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm0, %ymm11, %ymm0
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm2
+; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm0, %ymm3
+; AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm3
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm7, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: retq
;
@@ -1008,67 +987,46 @@ define <8 x i32> @widen_ctlz_undef_v2i32_v8i32(<2 x i32> %a0, <2 x i32> %a1, <2
; AVX2: # %bb.0:
; AVX2-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm5
-; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpand %xmm6, %xmm5, %xmm5
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm7
-; AVX2-NEXT: vpsrlw $4, %xmm1, %xmm8
-; AVX2-NEXT: vpand %xmm6, %xmm8, %xmm8
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm8, %xmm8
-; AVX2-NEXT: vpsrlw $4, %xmm2, %xmm9
-; AVX2-NEXT: vpand %xmm6, %xmm9, %xmm9
-; AVX2-NEXT: vpsrlw $4, %xmm3, %xmm10
-; AVX2-NEXT: vpand %xmm6, %xmm10, %xmm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm6, %xmm6
-; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm8, %ymm6
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm8
-; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; AVX2-NEXT: # ymm10 = mem[0,1,0,1]
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm11
-; AVX2-NEXT: vpand %ymm6, %ymm11, %ymm6
-; AVX2-NEXT: vpsrlw $4, %ymm8, %ymm8
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm11 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpand %ymm11, %ymm8, %ymm8
-; AVX2-NEXT: vpshufb %ymm8, %ymm10, %ymm8
-; AVX2-NEXT: vpaddb %ymm6, %ymm8, %ymm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm8
-; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm7, %ymm7
-; AVX2-NEXT: vpsrlw $8, %ymm7, %ymm7
-; AVX2-NEXT: vpand %ymm7, %ymm6, %ymm7
-; AVX2-NEXT: vpsrlw $8, %ymm6, %ymm6
-; AVX2-NEXT: vpaddw %ymm7, %ymm6, %ymm6
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm1, %xmm1
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm3
+; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX2-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm4
+; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX2-NEXT: vpxor %xmm7, %xmm7, %xmm7
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm5, %ymm8
+; AVX2-NEXT: vpand %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpshufb %ymm5, %ymm3, %ymm5
+; AVX2-NEXT: vpaddb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm1, %ymm5
+; AVX2-NEXT: vpsrlw $8, %ymm5, %ymm5
+; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlw $8, %ymm4, %ymm4
+; AVX2-NEXT: vpaddw %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqw %ymm7, %ymm1, %ymm1
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpand %ymm1, %ymm6, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
-; AVX2-NEXT: vpaddd %ymm1, %ymm6, %ymm1
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm6
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm5, %xmm5
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm9, %xmm7
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm5, %ymm5
-; AVX2-NEXT: vpcmpeqw %xmm4, %xmm2, %xmm7
-; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm4
+; AVX2-NEXT: vpand %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
+; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm2
+; AVX2-NEXT: vpshufb %ymm0, %ymm3, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX2-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm4, %ymm5
; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm0, %ymm11, %ymm0
-; AVX2-NEXT: vpshufb %ymm0, %ymm10, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm2
+; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqb %ymm7, %ymm0, %ymm3
+; AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm3
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm3, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqw %ymm7, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: retq
;
More information about the llvm-commits
mailing list