[llvm] r321490 - [X86] Fix vmul combine for AVX1 targets.

Benjamin Kramer via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 27 05:31:50 PST 2017


Author: d0k
Date: Wed Dec 27 05:31:50 2017
New Revision: 321490

URL: http://llvm.org/viewvc/llvm-project?rev=321490&view=rev
Log:
[X86] Fix vmul combine for AVX1 targets.

v8i32 is legal von AVX1, but it doesn't have pmuludq for it.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=321490&r1=321489&r2=321490&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Dec 27 05:31:50 2017
@@ -32431,6 +32431,10 @@ static SDValue combineVMUL(SDNode *N, Se
   if (VT.getScalarType() != MVT::i64)
     return SDValue();
 
+  // Don't try to lower 256 bit integer vectors on AVX1 targets.
+  if (!Subtarget.hasAVX2() && VT.getVectorNumElements() > 2)
+    return SDValue();
+
   MVT MulVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
 
   SDValue LHS = N->getOperand(0);

Modified: llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll?rev=321490&r1=321489&r2=321490&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-pmuldq.ll Wed Dec 27 05:31:50 2017
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512VL
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512DQVL
@@ -142,3 +143,46 @@ define <4 x i64> @combine_shuffle_zero_p
   %5 = mul <4 x i64> %3, %4
   ret <4 x i64> %5
 }
+
+define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
+; SSE-LABEL: combine_zext_pmuludq_256:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [715827883,715827883]
+; SSE-NEXT:    pmuludq %xmm1, %xmm0
+; SSE-NEXT:    pmuludq %xmm1, %xmm2
+; SSE-NEXT:    pmuludq %xmm1, %xmm4
+; SSE-NEXT:    pmuludq %xmm1, %xmm3
+; SSE-NEXT:    movdqa %xmm4, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: combine_zext_pmuludq_256:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [715827883,715827883,715827883,715827883]
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512VL-LABEL: combine_zext_pmuludq_256:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512VL-NEXT:    vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512DQVL-LABEL: combine_zext_pmuludq_256:
+; AVX512DQVL:       # %bb.0:
+; AVX512DQVL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512DQVL-NEXT:    vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512DQVL-NEXT:    retq
+  %1 = zext <8 x i32> %a to <8 x i64>
+  %2 = mul nuw nsw <8 x i64> %1, <i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883>
+  ret <8 x i64> %2
+}




More information about the llvm-commits mailing list