[llvm] r290902 - [X86][SSE] Add extra truncated arithmetic tests for D28219
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 3 11:18:07 PST 2017
Author: rksimon
Date: Tue Jan 3 13:18:07 2017
New Revision: 290902
URL: http://llvm.org/viewvc/llvm-project?rev=290902&view=rev
Log:
[X86][SSE] Add extra truncated arithmetic tests for D28219
Modified:
llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll?rev=290902&r1=290901&r2=290902&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll Tue Jan 3 13:18:07 2017
@@ -5488,8 +5488,8 @@ define <16 x i8> @trunc_or_const_v16i16_
; complex patterns - often created by vectorizer
;
-define <4 x i32> @mul_add_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
-; SSE-LABEL: mul_add_v4i64_v4i32:
+define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_const_v4i64_v4i32:
; SSE: # BB#0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrad $31, %xmm2
@@ -5530,7 +5530,7 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE-NEXT: retq
;
-; AVX1-LABEL: mul_add_v4i64_v4i32:
+; AVX1-LABEL: mul_add_const_v4i64_v4i32:
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
@@ -5545,7 +5545,7 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: mul_add_v4i64_v4i32:
+; AVX2-LABEL: mul_add_const_v4i64_v4i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -5557,7 +5557,7 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512F-LABEL: mul_add_v4i64_v4i32:
+; AVX512F-LABEL: mul_add_const_v4i64_v4i32:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512F-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -5567,7 +5567,7 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512F-NEXT: retq
;
-; AVX512BW-LABEL: mul_add_v4i64_v4i32:
+; AVX512BW-LABEL: mul_add_const_v4i64_v4i32:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512BW-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -5577,7 +5577,7 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512BW-NEXT: retq
;
-; AVX512DQ-LABEL: mul_add_v4i64_v4i32:
+; AVX512DQ-LABEL: mul_add_const_v4i64_v4i32:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -5593,3 +5593,217 @@ define <4 x i32> @mul_add_v4i64_v4i32(<4
%5 = trunc <4 x i64> %4 to <4 x i32>
ret <4 x i32> %5
}
+
+define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_self_v4i64_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: psrad $31, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: psrad $31, %xmm4
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm3, %xmm5
+; SSE-NEXT: paddq %xmm4, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: paddq %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrlq $32, %xmm3
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm0, %xmm4
+; SSE-NEXT: paddq %xmm3, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: paddq %xmm4, %xmm1
+; SSE-NEXT: paddq %xmm1, %xmm1
+; SSE-NEXT: paddq %xmm2, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: mul_add_self_v4i64_v4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: mul_add_self_v4i64_v4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: mul_add_self_v4i64_v4i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512F-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512F-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddq %ymm0, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mul_add_self_v4i64_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512BW-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512BW-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpaddq %ymm0, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mul_add_self_v4i64_v4i32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpaddq %ymm0, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %3
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+
+define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm4, %xmm2
+; SSE-NEXT: psrad $31, %xmm2
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: psrlq $32, %xmm2
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm3, %xmm4
+; SSE-NEXT: paddq %xmm2, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: psrlq $32, %xmm6
+; SSE-NEXT: pmuludq %xmm1, %xmm6
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: paddq %xmm6, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: paddq %xmm1, %xmm2
+; SSE-NEXT: paddq %xmm3, %xmm4
+; SSE-NEXT: paddq %xmm5, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512F-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512F-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512BW-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512BW-NEXT: vpmuldq %ymm1, %ymm0, %ymm1
+; AVX512BW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovsxdq %xmm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxdq %xmm1, %ymm1
+; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm1
+; AVX512DQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %1, %3
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
More information about the llvm-commits
mailing list