[llvm] r347266 - [X86] Add test case to show missed opportunity to use a single pmuludq to implement a multiply when a zext lives in another basic block.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 19 14:04:13 PST 2018


Author: ctopper
Date: Mon Nov 19 14:04:12 2018
New Revision: 347266

URL: http://llvm.org/viewvc/llvm-project?rev=347266&view=rev
Log:
[X86] Add test case to show missed opportunity to use a single pmuludq to implement a multiply when a zext lives in another basic block.

This can occur when one of the inputs to the multiply is loop invariant. Though my test cases just use two basic blocks with an unconditional jump which we won't merge until after isel in the codegen pipeline.

For scalars, I believe SelectionDAGBuilder can add an AssertZExt to pass knowledge across basic blocks but its explicitly disabled for vectors.

Modified:
    llvm/trunk/test/CodeGen/X86/vector-mul.ll

Modified: llvm/trunk/test/CodeGen/X86/vector-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-mul.ll?rev=347266&r1=347265&r2=347266&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-mul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-mul.ll Mon Nov 19 14:04:12 2018
@@ -1240,3 +1240,141 @@ define <2 x i64> @mul_v2i64_60_120(<2 x
   %mul = mul <2 x i64> %x, <i64 60, i64 124>
   ret <2 x i64> %mul
 }
+
+; We unfortunately can't see the zext that lives in the other basic block so we
+; don't know that we only need one pmuludq to compute the full 64 bits. This
+; sort of issue is more likely to occur when there is a loop and one of the
+; multiply inputs is loop invariant.
+; FIXME: We should be able to insert an AssertZExt for this.
+define <2 x i64> @mul_v2i64_zext_cross_bb(<2 x i32>* %in, <2 x i32>* %y) {
+; X86-LABEL: mul_v2i64_zext_cross_bb:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X86-NEXT:    movdqa %xmm0, %xmm2
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    psrlq $32, %xmm0
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    psllq $32, %xmm0
+; X86-NEXT:    paddq %xmm2, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: mul_v2i64_zext_cross_bb:
+; X64:       # %bb.0:
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-NEXT:    movdqa %xmm0, %xmm2
+; X64-NEXT:    pmuludq %xmm1, %xmm2
+; X64-NEXT:    psrlq $32, %xmm0
+; X64-NEXT:    pmuludq %xmm1, %xmm0
+; X64-NEXT:    psllq $32, %xmm0
+; X64-NEXT:    paddq %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X64-AVX-LABEL: mul_v2i64_zext_cross_bb:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
+; X64-AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; X64-AVX-NEXT:    retq
+  %a = load <2 x i32>, <2 x i32>* %in
+  %b = zext <2 x i32> %a to <2 x i64>
+  br label %foo
+
+foo:
+  %c = load <2 x i32>, <2 x i32>* %y
+  %d = zext <2 x i32> %c to <2 x i64>
+  %e = mul <2 x i64> %b, %d
+  ret <2 x i64> %e
+}
+
+define <4 x i64> @mul_v4i64_zext_cross_bb(<4 x i32>* %in, <4 x i32>* %y) {
+; X86-LABEL: mul_v4i64_zext_cross_bb:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X86-NEXT:    pmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
+; X86-NEXT:    movdqa %xmm0, %xmm4
+; X86-NEXT:    pmuludq %xmm3, %xmm4
+; X86-NEXT:    psrlq $32, %xmm0
+; X86-NEXT:    pmuludq %xmm3, %xmm0
+; X86-NEXT:    psllq $32, %xmm0
+; X86-NEXT:    paddq %xmm4, %xmm0
+; X86-NEXT:    movdqa %xmm1, %xmm3
+; X86-NEXT:    pmuludq %xmm2, %xmm3
+; X86-NEXT:    psrlq $32, %xmm1
+; X86-NEXT:    pmuludq %xmm2, %xmm1
+; X86-NEXT:    psllq $32, %xmm1
+; X86-NEXT:    paddq %xmm3, %xmm1
+; X86-NEXT:    retl
+;
+; X64-LABEL: mul_v4i64_zext_cross_bb:
+; X64:       # %bb.0:
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X64-NEXT:    pmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
+; X64-NEXT:    movdqa %xmm0, %xmm4
+; X64-NEXT:    pmuludq %xmm3, %xmm4
+; X64-NEXT:    psrlq $32, %xmm0
+; X64-NEXT:    pmuludq %xmm3, %xmm0
+; X64-NEXT:    psllq $32, %xmm0
+; X64-NEXT:    paddq %xmm4, %xmm0
+; X64-NEXT:    movdqa %xmm1, %xmm3
+; X64-NEXT:    pmuludq %xmm2, %xmm3
+; X64-NEXT:    psrlq $32, %xmm1
+; X64-NEXT:    pmuludq %xmm2, %xmm1
+; X64-NEXT:    psllq $32, %xmm1
+; X64-NEXT:    paddq %xmm3, %xmm1
+; X64-NEXT:    retq
+;
+; X64-XOP-LABEL: mul_v4i64_zext_cross_bb:
+; X64-XOP:       # %bb.0:
+; X64-XOP-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; X64-XOP-NEXT:    vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-XOP-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X64-XOP-NEXT:    vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; X64-XOP-NEXT:    vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
+; X64-XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; X64-XOP-NEXT:    vpmuludq %xmm2, %xmm3, %xmm4
+; X64-XOP-NEXT:    vpsrlq $32, %xmm3, %xmm3
+; X64-XOP-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; X64-XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; X64-XOP-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; X64-XOP-NEXT:    vpmuludq %xmm1, %xmm0, %xmm3
+; X64-XOP-NEXT:    vpsrlq $32, %xmm0, %xmm0
+; X64-XOP-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
+; X64-XOP-NEXT:    vpsllq $32, %xmm0, %xmm0
+; X64-XOP-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; X64-XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-XOP-NEXT:    retq
+;
+; X64-AVX2-LABEL: mul_v4i64_zext_cross_bb:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X64-AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X64-AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm2
+; X64-AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpsllq $32, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; X64-AVX2-NEXT:    retq
+  %a = load <4 x i32>, <4 x i32>* %in
+  %b = zext <4 x i32> %a to <4 x i64>
+  br label %foo
+
+foo:
+  %c = load <4 x i32>, <4 x i32>* %y
+  %d = zext <4 x i32> %c to <4 x i64>
+  %e = mul <4 x i64> %b, %d
+  ret <4 x i64> %e
+}




More information about the llvm-commits mailing list