<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
</head>
<body>
On 26/09/2021 01:53, Craig Topper wrote:<br>
<blockquote type="cite"
cite="mid:CAF7ks-N2OdAXQfggDY+GQHHS2WLBoC0T8U4CL9EGbO-igDVy_Q@mail.gmail.com">
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<div dir="ltr">Why doesn't this work for wider vectors than 128?<br
clear="all">
</div>
</blockquote>
<p>I'm still looking at this - it currently interferes with
horizontal-add pattern matching, but should be fixable with a
add(pmaddwd,pmaddwd) -> pmaddwd fold.
</p>
<blockquote type="cite"
cite="mid:CAF7ks-N2OdAXQfggDY+GQHHS2WLBoC0T8U4CL9EGbO-igDVy_Q@mail.gmail.com">
<div dir="ltr">
<div>
<div>
<div dir="ltr" class="gmail_signature"
data-smartmail="gmail_signature">~Craig</div>
</div>
<br>
</div>
</div>
<br>
<div class="gmail_quote">
<div dir="ltr" class="gmail_attr">On Sat, Sep 25, 2021 at 7:57
AM Simon Pilgrim via llvm-commits <<a
href="mailto:llvm-commits@lists.llvm.org"
moz-do-not-send="true">llvm-commits@lists.llvm.org</a>>
wrote:<br>
</div>
<blockquote class="gmail_quote" style="margin:0px 0px 0px
0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><br>
Author: Simon Pilgrim<br>
Date: 2021-09-25T15:50:45+01:00<br>
New Revision: 2a4fa0c27c938b9767dd42d57cc7c4e5a670b302<br>
<br>
URL: <a
href="https://github.com/llvm/llvm-project/commit/2a4fa0c27c938b9767dd42d57cc7c4e5a670b302"
rel="noreferrer" target="_blank" moz-do-not-send="true">https://github.com/llvm/llvm-project/commit/2a4fa0c27c938b9767dd42d57cc7c4e5a670b302</a><br>
DIFF: <a
href="https://github.com/llvm/llvm-project/commit/2a4fa0c27c938b9767dd42d57cc7c4e5a670b302.diff"
rel="noreferrer" target="_blank" moz-do-not-send="true">https://github.com/llvm/llvm-project/commit/2a4fa0c27c938b9767dd42d57cc7c4e5a670b302.diff</a><br>
<br>
LOG: [X86][SSE] combineMulToPMADDWD - enable sext(v8i16) ->
zext(v8i16) fold on sub-128 bit vectors<br>
<br>
Added: <br>
<br>
<br>
Modified: <br>
llvm/lib/Target/X86/X86ISelLowering.cpp<br>
llvm/test/CodeGen/X86/shrink_vmul.ll<br>
<br>
Removed: <br>
<br>
<br>
<br>
################################################################################<br>
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp
b/llvm/lib/Target/X86/X86ISelLowering.cpp<br>
index a3b5640c1a623..702d16731d22f 100644<br>
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp<br>
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp<br>
@@ -44266,7 +44266,7 @@ static SDValue
combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,<br>
return Op;<br>
// Convert sext(vXi16) to zext(vXi16).<br>
// TODO: Handle sext from smaller types as well?<br>
- if (Op.getOpcode() == ISD::SIGN_EXTEND &&
VT.is128BitVector() &&<br>
+ if (Op.getOpcode() == ISD::SIGN_EXTEND &&
VT.getSizeInBits() <= 128 &&<br>
N->isOnlyUserOf(Op.getNode())) {<br>
SDValue Src = Op.getOperand(0);<br>
if (Src.getScalarValueSizeInBits() == 16)<br>
<br>
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll
b/llvm/test/CodeGen/X86/shrink_vmul.ll<br>
index 900f6a08076a5..3e1688635a5f3 100644<br>
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll<br>
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll<br>
@@ -1078,10 +1078,10 @@ define void @mul_2xi16_sext(i8*
nocapture readonly %a, i8* nocapture readonly %b<br>
; X86-SSE-NEXT: movl c, %esi<br>
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero<br>
-; X86-SSE-NEXT: movdqa %xmm1, %xmm2<br>
-; X86-SSE-NEXT: pmulhw %xmm0, %xmm2<br>
-; X86-SSE-NEXT: pmullw %xmm0, %xmm1<br>
+; X86-SSE-NEXT: pxor %xmm2, %xmm2<br>
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]<br>
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 =
xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]<br>
+; X86-SSE-NEXT: pmaddwd %xmm0, %xmm1<br>
; X86-SSE-NEXT: movq %xmm1, (%esi,%ecx,4)<br>
; X86-SSE-NEXT: popl %esi<br>
; X86-SSE-NEXT: retl<br>
@@ -1094,10 +1094,10 @@ define void @mul_2xi16_sext(i8*
nocapture readonly %a, i8* nocapture readonly %b<br>
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx<br>
; X86-AVX-NEXT: movl c, %esi<br>
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 =
mem[0],zero,zero,zero<br>
-; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0<br>
; X86-AVX-NEXT: vmovd {{.*#+}} xmm1 =
mem[0],zero,zero,zero<br>
-; X86-AVX-NEXT: vpmovsxwd %xmm1, %xmm1<br>
-; X86-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0<br>
+; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 =
xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero<br>
+; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 =
xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero<br>
+; X86-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0<br>
; X86-AVX-NEXT: vmovq %xmm0, (%esi,%ecx,4)<br>
; X86-AVX-NEXT: popl %esi<br>
; X86-AVX-NEXT: retl<br>
@@ -1107,10 +1107,10 @@ define void @mul_2xi16_sext(i8*
nocapture readonly %a, i8* nocapture readonly %b<br>
; X64-SSE-NEXT: movq c(%rip), %rax<br>
; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero<br>
-; X64-SSE-NEXT: movdqa %xmm1, %xmm2<br>
-; X64-SSE-NEXT: pmulhw %xmm0, %xmm2<br>
-; X64-SSE-NEXT: pmullw %xmm0, %xmm1<br>
+; X64-SSE-NEXT: pxor %xmm2, %xmm2<br>
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]<br>
; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 =
xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]<br>
+; X64-SSE-NEXT: pmaddwd %xmm0, %xmm1<br>
; X64-SSE-NEXT: movq %xmm1, (%rax,%rdx,4)<br>
; X64-SSE-NEXT: retq<br>
;<br>
@@ -1118,10 +1118,10 @@ define void @mul_2xi16_sext(i8*
nocapture readonly %a, i8* nocapture readonly %b<br>
; X64-AVX: # %bb.0: # %entry<br>
; X64-AVX-NEXT: movq c(%rip), %rax<br>
; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 =
mem[0],zero,zero,zero<br>
-; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0<br>
; X64-AVX-NEXT: vmovd {{.*#+}} xmm1 =
mem[0],zero,zero,zero<br>
-; X64-AVX-NEXT: vpmovsxwd %xmm1, %xmm1<br>
-; X64-AVX-NEXT: vpmulld %xmm0, %xmm1, %xmm0<br>
+; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 =
xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero<br>
+; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 =
xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero<br>
+; X64-AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0<br>
; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rdx,4)<br>
; X64-AVX-NEXT: retq<br>
entry:<br>
@@ -1874,11 +1874,9 @@ define void @mul_2xi16_varconst2(i8*
nocapture readonly %a, i64 %index) {<br>
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx<br>
; X86-SSE-NEXT: movl c, %edx<br>
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
-; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 =
<32768,32767,u,u,u,u,u,u><br>
-; X86-SSE-NEXT: movdqa %xmm0, %xmm2<br>
-; X86-SSE-NEXT: pmulhw %xmm1, %xmm2<br>
-; X86-SSE-NEXT: pmullw %xmm1, %xmm0<br>
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]<br>
+; X86-SSE-NEXT: pxor %xmm1, %xmm1<br>
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]<br>
+; X86-SSE-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0<br>
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)<br>
; X86-SSE-NEXT: retl<br>
;<br>
@@ -1888,8 +1886,8 @@ define void @mul_2xi16_varconst2(i8*
nocapture readonly %a, i64 %index) {<br>
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx<br>
; X86-AVX-NEXT: movl c, %edx<br>
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 =
mem[0],zero,zero,zero<br>
-; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0<br>
-; X86-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0,
%xmm0<br>
+; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 =
xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero<br>
+; X86-AVX-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0,
%xmm0<br>
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)<br>
; X86-AVX-NEXT: retl<br>
;<br>
@@ -1897,11 +1895,9 @@ define void @mul_2xi16_varconst2(i8*
nocapture readonly %a, i64 %index) {<br>
; X64-SSE: # %bb.0: # %entry<br>
; X64-SSE-NEXT: movq c(%rip), %rax<br>
; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero<br>
-; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 =
<32768,32767,u,u,u,u,u,u><br>
-; X64-SSE-NEXT: movdqa %xmm0, %xmm2<br>
-; X64-SSE-NEXT: pmulhw %xmm1, %xmm2<br>
-; X64-SSE-NEXT: pmullw %xmm1, %xmm0<br>
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]<br>
+; X64-SSE-NEXT: pxor %xmm1, %xmm1<br>
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 =
xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]<br>
+; X64-SSE-NEXT: pmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip),
%xmm0<br>
; X64-SSE-NEXT: movq %xmm0, (%rax,%rsi,4)<br>
; X64-SSE-NEXT: retq<br>
;<br>
@@ -1909,8 +1905,8 @@ define void @mul_2xi16_varconst2(i8*
nocapture readonly %a, i64 %index) {<br>
; X64-AVX: # %bb.0: # %entry<br>
; X64-AVX-NEXT: movq c(%rip), %rax<br>
; X64-AVX-NEXT: vmovd {{.*#+}} xmm0 =
mem[0],zero,zero,zero<br>
-; X64-AVX-NEXT: vpmovsxwd %xmm0, %xmm0<br>
-; X64-AVX-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip),
%xmm0, %xmm0<br>
+; X64-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 =
xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero<br>
+; X64-AVX-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip),
%xmm0, %xmm0<br>
; X64-AVX-NEXT: vmovq %xmm0, (%rax,%rsi,4)<br>
; X64-AVX-NEXT: retq<br>
entry:<br>
<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org" target="_blank"
moz-do-not-send="true">llvm-commits@lists.llvm.org</a><br>
<a
href="https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits"
rel="noreferrer" target="_blank" moz-do-not-send="true">https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote>
</div>
</blockquote>
</body>
</html>