[llvm] r315825 - [X86][SSE] Don't attempt to reduce the imul vector width of odd sized vectors (PR34947)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Oct 14 12:57:19 PDT 2017
Author: rksimon
Date: Sat Oct 14 12:57:19 2017
New Revision: 315825
URL: http://llvm.org/viewvc/llvm-project?rev=315825&view=rev
Log:
[X86][SSE] Don't attempt to reduce the imul vector width of odd sized vectors (PR34947)
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/shrink_vmul.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=315825&r1=315824&r2=315825&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Oct 14 12:57:19 2017
@@ -31476,6 +31476,9 @@ static SDValue reduceVMULWidth(SDNode *N
SDValue N1 = N->getOperand(1);
EVT VT = N->getOperand(0).getValueType();
unsigned NumElts = VT.getVectorNumElements();
+ if ((NumElts % 2) != 0)
+ return SDValue();
+
unsigned RegSize = 128;
MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
@@ -31502,7 +31505,7 @@ static SDValue reduceVMULWidth(SDNode *N
// result.
// Generate shuffle functioning as punpcklwd.
SmallVector<int, 16> ShuffleMask(NumElts);
- for (unsigned i = 0, e = NumElts/ 2; i < e; i++) {
+ for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
ShuffleMask[2 * i] = i;
ShuffleMask[2 * i + 1] = i + NumElts;
}
Modified: llvm/trunk/test/CodeGen/X86/shrink_vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shrink_vmul.ll?rev=315825&r1=315824&r2=315825&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shrink_vmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shrink_vmul.ll Sat Oct 14 12:57:19 2017
@@ -1349,3 +1349,108 @@ entry:
store <2 x i32> %tmp13, <2 x i32>* %tmp15, align 4
ret void
}
+
+;
+; Illegal Types
+;
+
+define void @PR34947() {
+; X86-LABEL: PR34947:
+; X86: # BB#0:
+; X86-NEXT: movdqa (%eax), %xmm0
+; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X86-NEXT: movd %xmm1, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl %ecx
+; X86-NEXT: movd %edx, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X86-NEXT: movd %xmm2, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl %ecx
+; X86-NEXT: movd %edx, %xmm2
+; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-NEXT: movd %xmm0, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl %ecx
+; X86-NEXT: movd %edx, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X86-NEXT: movd %xmm0, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl %ecx
+; X86-NEXT: movd %edx, %xmm0
+; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl (%eax)
+; X86-NEXT: movd %edx, %xmm0
+; X86-NEXT: movdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
+; X86-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; X86-NEXT: pmuludq %xmm2, %xmm1
+; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT: pmuludq %xmm2, %xmm3
+; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-NEXT: movl $8199, %eax # imm = 0x2007
+; X86-NEXT: movd %eax, %xmm2
+; X86-NEXT: pmuludq %xmm0, %xmm2
+; X86-NEXT: movd %xmm2, (%eax)
+; X86-NEXT: movdqa %xmm1, (%eax)
+; X86-NEXT: retl
+;
+; X64-LABEL: PR34947:
+; X64: # BB#0:
+; X64-NEXT: movdqa (%rax), %xmm0
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X64-NEXT: movd %xmm1, %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: movd %edx, %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X64-NEXT: movd %xmm2, %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: movd %edx, %xmm2
+; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-NEXT: movd %xmm0, %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: movd %edx, %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; X64-NEXT: movd %xmm0, %ecx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %ecx
+; X64-NEXT: movd %edx, %xmm0
+; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl (%rax)
+; X64-NEXT: movd %edx, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
+; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; X64-NEXT: pmuludq %xmm2, %xmm1
+; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X64-NEXT: pmuludq %xmm2, %xmm3
+; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-NEXT: movl $8199, %eax # imm = 0x2007
+; X64-NEXT: movd %eax, %xmm2
+; X64-NEXT: pmuludq %xmm0, %xmm2
+; X64-NEXT: movd %xmm2, (%rax)
+; X64-NEXT: movdqa %xmm1, (%rax)
+; X64-NEXT: retq
+ %tmp = load <9 x i32>, <9 x i32>* undef, align 64
+ %rem = urem <9 x i32> zeroinitializer, %tmp
+ %mul = mul <9 x i32> <i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199, i32 8199>, %rem
+ store <9 x i32> %mul, <9 x i32>* undef, align 64
+ ret void
+}
More information about the llvm-commits
mailing list