[llvm] r300740 - Update the madd.ll test with utils/update_llc_test_checks.py (NFC)
Dehao Chen via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 19 13:08:14 PDT 2017
Author: dehao
Date: Wed Apr 19 15:08:14 2017
New Revision: 300740
URL: http://llvm.org/viewvc/llvm-project?rev=300740&view=rev
Log:
Update the madd.ll test with utils/update_llc_test_checks.py (NFC)
Modified:
llvm/trunk/test/CodeGen/X86/madd.ll
Modified: llvm/trunk/test/CodeGen/X86/madd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/madd.ll?rev=300740&r1=300739&r2=300740&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/madd.ll (original)
+++ llvm/trunk/test/CodeGen/X86/madd.ll Wed Apr 19 15:08:14 2017
@@ -1,30 +1,86 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512
-;SSE2-LABEL: @_Z10test_shortPsS_i
-;SSE2: movdqu
-;SSE2-NEXT: movdqu
-;SSE2-NEXT: pmaddwd
-;SSE2-NEXT: paddd
-;SSE2: ret
-
-;AVX2-LABEL: @_Z10test_shortPsS_i
-;AVX2: vmovdqu
-;AVX2-NEXT: vpmaddwd
-;AVX2-NEXT: vinserti128
-;AVX2-NEXT: vpaddd
-;AVX2: ret
-
-;AVX512-LABEL: @_Z10test_shortPsS_i
-;AVX512: vmovdqu
-;AVX512-NEXT: vpmaddwd
-;AVX512-NEXT: vinserti128
-;AVX512-NEXT: vpaddd
-;AVX512: ret
-
define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+; SSE2-LABEL: _Z10test_shortPsS_i:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movl %edx, %eax
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: .p2align 4, 0x90
+; SSE2-NEXT: .LBB0_1: # %vector.body
+; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE2-NEXT: movdqu (%rdi), %xmm2
+; SSE2-NEXT: movdqu (%rsi), %xmm3
+; SSE2-NEXT: pmaddwd %xmm2, %xmm3
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: addq $16, %rsi
+; SSE2-NEXT: addq $16, %rdi
+; SSE2-NEXT: addq $-8, %rax
+; SSE2-NEXT: jne .LBB0_1
+; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: _Z10test_shortPsS_i:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: movl %edx, %eax
+; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB0_1: # %vector.body
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: vmovdqu (%rsi), %xmm2
+; AVX2-NEXT: vpmaddwd (%rdi), %xmm2, %xmm2
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: addq $16, %rsi
+; AVX2-NEXT: addq $16, %rdi
+; AVX2-NEXT: addq $-8, %rax
+; AVX2-NEXT: jne .LBB0_1
+; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _Z10test_shortPsS_i:
+; AVX512: # BB#0: # %entry
+; AVX512-NEXT: movl %edx, %eax
+; AVX512-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: .p2align 4, 0x90
+; AVX512-NEXT: .LBB0_1: # %vector.body
+; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX512-NEXT: vmovdqu (%rsi), %xmm2
+; AVX512-NEXT: vpmaddwd (%rdi), %xmm2, %xmm2
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
+; AVX512-NEXT: addq $16, %rsi
+; AVX512-NEXT: addq $16, %rdi
+; AVX512-NEXT: addq $-8, %rax
+; AVX512-NEXT: jne .LBB0_1
+; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
entry:
%3 = zext i32 %2 to i64
br label %vector.body
@@ -57,19 +113,87 @@ middle.block:
ret i32 %13
}
-;SSE2-LABEL: @test_unsigned_short
-;SSE2-NOT: pmaddwd
-;SSE2: ret
-
-;AVX2-LABEL: @test_unsigned_short
-;AVX2-NOT: vpmaddwd
-;AVX2: ret
-
-;AVX512-LABEL: @test_unsigned_short
-;AVX512-NOT: vpmaddwd
-;AVX512: ret
-
define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly, i32) local_unnamed_addr #0 {
+; SSE2-LABEL: test_unsigned_short:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movl %edx, %eax
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: .p2align 4, 0x90
+; SSE2-NEXT: .LBB1_1: # %vector.body
+; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE2-NEXT: movdqu (%rdi), %xmm2
+; SSE2-NEXT: movdqu (%rsi), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pmulhuw %xmm2, %xmm4
+; SSE2-NEXT: pmullw %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: addq $16, %rsi
+; SSE2-NEXT: addq $16, %rdi
+; SSE2-NEXT: addq $-8, %rax
+; SSE2-NEXT: jne .LBB1_1
+; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: test_unsigned_short:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: movl %edx, %eax
+; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB1_1: # %vector.body
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmulld %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: addq $16, %rsi
+; AVX2-NEXT: addq $16, %rdi
+; AVX2-NEXT: addq $-8, %rax
+; AVX2-NEXT: jne .LBB1_1
+; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_unsigned_short:
+; AVX512: # BB#0: # %entry
+; AVX512-NEXT: movl %edx, %eax
+; AVX512-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: .p2align 4, 0x90
+; AVX512-NEXT: .LBB1_1: # %vector.body
+; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX512-NEXT: vpmulld %ymm1, %ymm2, %ymm1
+; AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: addq $16, %rsi
+; AVX512-NEXT: addq $16, %rdi
+; AVX512-NEXT: addq $-8, %rax
+; AVX512-NEXT: jne .LBB1_1
+; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
entry:
%3 = zext i32 %2 to i64
br label %vector.body
@@ -102,22 +226,114 @@ middle.block:
ret i32 %13
}
-;AVX2-LABEL: @_Z9test_charPcS_i
-;AVX2: vpmovsxbw
-;AVX2-NEXT: vpmovsxbw
-;AVX2-NEXT: vpmaddwd
-;AVX2-NEXT: vpaddd
-;AVX2: ret
-
-;AVX512-LABEL: @_Z9test_charPcS_i
-;AVX512: vpmovsxbw
-;AVX512-NEXT: vpmovsxbw
-;AVX512-NEXT: vpmaddwd
-;AVX512-NEXT: vinserti64x4
-;AVX512-NEXT: vpaddd
-;AVX512: ret
-
define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i32) local_unnamed_addr #0 {
+; SSE2-LABEL: _Z9test_charPcS_i:
+; SSE2: # BB#0: # %entry
+; SSE2-NEXT: movl %edx, %eax
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: .p2align 4, 0x90
+; SSE2-NEXT: .LBB2_1: # %vector.body
+; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm4
+; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm5
+; SSE2-NEXT: pmullw %xmm4, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: movq {{.*#+}} xmm6 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: movq {{.*#+}} xmm7 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: pmullw %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: addq $16, %rsi
+; SSE2-NEXT: addq $16, %rdi
+; SSE2-NEXT: addq $-16, %rax
+; SSE2-NEXT: jne .LBB2_1
+; SSE2-NEXT: # BB#2: # %middle.block
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: retq
+;
+; AVX2-LABEL: _Z9test_charPcS_i:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: movl %edx, %eax
+; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB2_1: # %vector.body
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: vpmovsxbw (%rdi), %ymm2
+; AVX2-NEXT: vpmovsxbw (%rsi), %ymm3
+; AVX2-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: addq $16, %rsi
+; AVX2-NEXT: addq $16, %rdi
+; AVX2-NEXT: addq $-16, %rax
+; AVX2-NEXT: jne .LBB2_1
+; AVX2-NEXT: # BB#2: # %middle.block
+; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: _Z9test_charPcS_i:
+; AVX512: # BB#0: # %entry
+; AVX512-NEXT: movl %edx, %eax
+; AVX512-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; AVX512-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX512-NEXT: .p2align 4, 0x90
+; AVX512-NEXT: .LBB2_1: # %vector.body
+; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX512-NEXT: vpmovsxbw (%rdi), %ymm2
+; AVX512-NEXT: vpmovsxbw (%rsi), %ymm3
+; AVX512-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; AVX512-NEXT: addq $16, %rsi
+; AVX512-NEXT: addq $16, %rdi
+; AVX512-NEXT: addq $-16, %rax
+; AVX512-NEXT: jne .LBB2_1
+; AVX512-NEXT: # BB#2: # %middle.block
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm0[4,5,6,7,0,1,0,1]
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm0[2,3,0,1,0,1,0,1]
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpshufd {{.*#+}} zmm1 = zmm0[1,1,2,3,5,5,6,7,9,9,10,11,13,13,14,15]
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
entry:
%3 = zext i32 %2 to i64
br label %vector.body
More information about the llvm-commits
mailing list