[llvm] 64eea34 - [X86] combineEXTEND_VECTOR_INREG - don't attempt to shuffle combine ANY_EXTEND_VECTOR_INREG without SSE41
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 13 09:42:12 PDT 2022
Author: Simon Pilgrim
Date: 2022-06-13T17:42:04+01:00
New Revision: 64eea34420e3ee1f6f91a57a72b2edf6f65d6ea7
URL: https://github.com/llvm/llvm-project/commit/64eea34420e3ee1f6f91a57a72b2edf6f65d6ea7
DIFF: https://github.com/llvm/llvm-project/commit/64eea34420e3ee1f6f91a57a72b2edf6f65d6ea7.diff
LOG: [X86] combineEXTEND_VECTOR_INREG - don't attempt to shuffle combine ANY_EXTEND_VECTOR_INREG without SSE41
Without SSE41, ANY_EXTEND_VECTOR_INREG nodes are likely to be prematurely combined to a target shuffle preventing generic sign extension folds.
Fixes a number of sign-extend regressions in D127115.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/is_fpclass.ll
llvm/test/CodeGen/X86/madd.ll
llvm/test/CodeGen/X86/mulvi32.ll
llvm/test/CodeGen/X86/pmaddubsw.ll
llvm/test/CodeGen/X86/promote-cmp.ll
llvm/test/CodeGen/X86/vector-trunc-math.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index edf36a746a776..15bee12580483 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -54215,10 +54215,10 @@ static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
}
- // Attempt to combine as a shuffle.
- // TODO: General ZERO_EXTEND_VECTOR_INREG support.
- if (Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
- (Opcode == ISD::ZERO_EXTEND_VECTOR_INREG && Subtarget.hasSSE41())) {
+ // Attempt to combine as a shuffle on SSE41+ targets.
+ if ((Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
+ Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) &&
+ Subtarget.hasSSE41()) {
SDValue Op(N, 0);
if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
diff --git a/llvm/test/CodeGen/X86/is_fpclass.ll b/llvm/test/CodeGen/X86/is_fpclass.ll
index 6ef60023eac7b..adf127040117b 100644
--- a/llvm/test/CodeGen/X86/is_fpclass.ll
+++ b/llvm/test/CodeGen/X86/is_fpclass.ll
@@ -835,9 +835,9 @@ define <2 x i1> @isnan_v2f_strictfp(<2 x float> %x) strictfp {
;
; CHECK-64-LABEL: isnan_v2f_strictfp:
; CHECK-64: # %bb.0: # %entry
-; CHECK-64-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; CHECK-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; CHECK-64-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; CHECK-64-NEXT: retq
entry:
%0 = tail call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> %x, i32 3) ; "nan"
diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index 9012cd72a94e3..f58beed3fe88f 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -2570,25 +2570,9 @@ define <4 x i32> @pmaddwd_swapped_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
define <4 x i32> @pmaddwd_bad_indices(<8 x i16>* %Aptr, <8 x i16>* %Bptr) {
; SSE2-LABEL: pmaddwd_bad_indices:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rdi), %xmm1
-; SSE2-NEXT: movdqa (%rsi), %xmm2
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[2,1,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,7,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pmaddwd %xmm3, %xmm0
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = mem[1,0,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
+; SSE2-NEXT: pmaddwd (%rsi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: pmaddwd_bad_indices:
diff --git a/llvm/test/CodeGen/X86/mulvi32.ll b/llvm/test/CodeGen/X86/mulvi32.ll
index 388b5bc8a746c..226ec4f63a1b8 100644
--- a/llvm/test/CodeGen/X86/mulvi32.ll
+++ b/llvm/test/CodeGen/X86/mulvi32.ll
@@ -137,8 +137,8 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/pmaddubsw.ll b/llvm/test/CodeGen/X86/pmaddubsw.ll
index 7229fd6bc8557..33f20edaa139d 100644
--- a/llvm/test/CodeGen/X86/pmaddubsw.ll
+++ b/llvm/test/CodeGen/X86/pmaddubsw.ll
@@ -303,20 +303,17 @@ define <8 x i16> @pmaddubsw_bad_extend(<16 x i8>* %Aptr, <16 x i8>* %Bptr) {
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: psraw $8, %xmm3
; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0],zero,xmm4[2],zero,xmm4[4],zero,xmm4[6],zero,xmm4[u,u,u,u,u,u,u,u]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: psrlw $8, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE-NEXT: pmaddwd %xmm4, %xmm0
-; SSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[8],zero,xmm2[10],zero,xmm2[12],zero,xmm2[14],zero,xmm2[u,u,u,u,u,u,u,u]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; SSE-NEXT: pshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,8],zero,xmm4[10],zero,xmm4[12],zero,xmm4[14],zero
+; SSE-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE-NEXT: psrlw $8, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE-NEXT: pmaddwd %xmm4, %xmm5
+; SSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[2],zero,xmm2[4],zero,xmm2[6],zero,xmm2[u,u,u,u,u,u,u,u]
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE-NEXT: pshufb {{.*#+}} xmm1 = xmm1[9],zero,xmm1[11],zero,xmm1[13],zero,xmm1[15],zero,xmm1[u,u,u,u,u,u,u,u]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; SSE-NEXT: pmaddwd %xmm2, %xmm6
-; SSE-NEXT: packssdw %xmm6, %xmm0
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT: pmaddwd %xmm2, %xmm0
+; SSE-NEXT: packssdw %xmm5, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: pmaddubsw_bad_extend:
diff --git a/llvm/test/CodeGen/X86/promote-cmp.ll b/llvm/test/CodeGen/X86/promote-cmp.ll
index 8cb33e9433a8d..3d1041cd78e36 100644
--- a/llvm/test/CodeGen/X86/promote-cmp.ll
+++ b/llvm/test/CodeGen/X86/promote-cmp.ll
@@ -27,15 +27,16 @@ define <4 x i64> @PR45808(<4 x i64> %0, <4 x i64> %1) {
; SSE2-NEXT: andps %xmm10, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm8[1,3]
; SSE2-NEXT: orps %xmm4, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,1,3,3]
-; SSE2-NEXT: psllq $63, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,1,3,3]
+; SSE2-NEXT: psllq $63, %xmm6
+; SSE2-NEXT: psrad $31, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pandn %xmm3, %xmm6
+; SSE2-NEXT: por %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,1,3]
-; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
; SSE2-NEXT: psllq $63, %xmm3
; SSE2-NEXT: psrad $31, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll
index f9d7c4e083450..b81695545cfe8 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-math.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll
@@ -4635,10 +4635,10 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
; SSE-LABEL: mul_add_const_v4i64_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: pmuludq %xmm3, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
@@ -4661,10 +4661,10 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
; SSE-LABEL: mul_add_self_v4i64_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
; SSE-NEXT: pmuludq %xmm2, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: pmuludq %xmm3, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: paddd %xmm0, %xmm0
@@ -4687,10 +4687,10 @@ define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nou
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
; SSE-NEXT: pmuludq %xmm2, %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE-NEXT: pmuludq %xmm3, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[0,2]
; SSE-NEXT: paddd %xmm4, %xmm0
More information about the llvm-commits
mailing list