[llvm] r265172 - [x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 1 11:29:25 PDT 2016


Author: spatel
Date: Fri Apr  1 13:29:25 2016
New Revision: 265172

URL: http://llvm.org/viewvc/llvm-project?rev=265172&view=rev
Log:
[x86] add an SSE2 + fast-unaligned accesses run for memset nonzero tests

Was there really no other way to splat a byte in SSE2?
    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]


Modified:
    llvm/trunk/test/CodeGen/X86/memset-nonzero.ll

Modified: llvm/trunk/test/CodeGen/X86/memset-nonzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memset-nonzero.ll?rev=265172&r1=265171&r2=265172&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memset-nonzero.ll (original)
+++ llvm/trunk/test/CodeGen/X86/memset-nonzero.ll Fri Apr  1 13:29:25 2016
@@ -1,8 +1,9 @@
 ; NOTE: Assertions have been autogenerated by update_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse | FileCheck %s --check-prefix=ANY --check-prefix=SSE --check-prefix=SSE1
-; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse2 | FileCheck %s --check-prefix=ANY --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx | FileCheck %s --check-prefix=ANY --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx2 | FileCheck %s --check-prefix=ANY --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse | FileCheck %s --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse2 | FileCheck %s --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=sse2,-slow-unaligned-mem-16 | FileCheck %s --check-prefix=SSE2FAST
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc -mtriple=x86_64-unknown-unknown < %s -mattr=avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
 
 ; https://llvm.org/bugs/show_bug.cgi?id=27100
 
@@ -13,6 +14,11 @@ define void @memset_16_nonzero_bytes(i8*
 ; SSE-NEXT:    movq %rax, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_16_nonzero_bytes:
+; SSE2FAST:         movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
+; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX-LABEL: memset_16_nonzero_bytes:
 ; AVX:         vmovaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %xmm0, (%rdi)
@@ -31,6 +37,12 @@ define void @memset_32_nonzero_bytes(i8*
 ; SSE-NEXT:    movq %rax, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_32_nonzero_bytes:
+; SSE2FAST:         movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
+; SSE2FAST-NEXT:    movups %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX-LABEL: memset_32_nonzero_bytes:
 ; AVX:         vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, (%rdi)
@@ -54,6 +66,14 @@ define void @memset_64_nonzero_bytes(i8*
 ; SSE-NEXT:    movq %rax, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_64_nonzero_bytes:
+; SSE2FAST:         movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
+; SSE2FAST-NEXT:    movups %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX-LABEL: memset_64_nonzero_bytes:
 ; AVX:         vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 32(%rdi)
@@ -86,6 +106,18 @@ define void @memset_128_nonzero_bytes(i8
 ; SSE-NEXT:    movq %rax, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_128_nonzero_bytes:
+; SSE2FAST:         movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
+; SSE2FAST-NEXT:    movups %xmm0, 112(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 96(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 80(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 64(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX-LABEL: memset_128_nonzero_bytes:
 ; AVX:         vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 96(%rdi)
@@ -110,6 +142,26 @@ define void @memset_256_nonzero_bytes(i8
 ; SSE-NEXT:    popq %rax
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_256_nonzero_bytes:
+; SSE2FAST:         movaps {{.*#+}} xmm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
+; SSE2FAST-NEXT:    movups %xmm0, 240(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 224(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 208(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 192(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 176(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 160(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 144(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 128(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 112(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 96(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 80(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 64(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movups %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX-LABEL: memset_256_nonzero_bytes:
 ; AVX:         vmovaps {{.*#+}} ymm0 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
 ; AVX-NEXT:    vmovups %ymm0, 224(%rdi)
@@ -140,6 +192,14 @@ define void @memset_16_nonconst_bytes(i8
 ; SSE-NEXT:    movq %rcx, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_16_nonconst_bytes:
+; SSE2FAST:         movd %esi, %xmm0
+; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2FAST-NEXT:    movdqu %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX1-LABEL: memset_16_nonconst_bytes:
 ; AVX1:         vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -168,6 +228,15 @@ define void @memset_32_nonconst_bytes(i8
 ; SSE-NEXT:    movq %rcx, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_32_nonconst_bytes:
+; SSE2FAST:         movd %esi, %xmm0
+; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2FAST-NEXT:    movdqu %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX1-LABEL: memset_32_nonconst_bytes:
 ; AVX1:         vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -203,6 +272,17 @@ define void @memset_64_nonconst_bytes(i8
 ; SSE-NEXT:    movq %rcx, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_64_nonconst_bytes:
+; SSE2FAST:         movd %esi, %xmm0
+; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2FAST-NEXT:    movdqu %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX1-LABEL: memset_64_nonconst_bytes:
 ; AVX1:         vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -248,6 +328,21 @@ define void @memset_128_nonconst_bytes(i
 ; SSE-NEXT:    movq %rcx, (%rdi)
 ; SSE-NEXT:    retq
 ;
+; SSE2FAST-LABEL: memset_128_nonconst_bytes:
+; SSE2FAST:         movd %esi, %xmm0
+; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2FAST-NEXT:    movdqu %xmm0, 112(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 96(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 80(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 64(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX1-LABEL: memset_128_nonconst_bytes:
 ; AVX1:         vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
@@ -279,6 +374,29 @@ define void @memset_256_nonconst_bytes(i
 ; SSE:         movl $256, %edx # imm = 0x100
 ; SSE-NEXT:    jmp memset # TAILCALL
 ;
+; SSE2FAST-LABEL: memset_256_nonconst_bytes:
+; SSE2FAST:         movd %esi, %xmm0
+; SSE2FAST-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2FAST-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2FAST-NEXT:    movdqu %xmm0, 240(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 224(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 208(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 192(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 176(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 160(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 144(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 128(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 112(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 96(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 80(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 64(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 48(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 32(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, 16(%rdi)
+; SSE2FAST-NEXT:    movdqu %xmm0, (%rdi)
+; SSE2FAST-NEXT:    retq
+;
 ; AVX1-LABEL: memset_256_nonconst_bytes:
 ; AVX1:         vmovd %esi, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1




More information about the llvm-commits mailing list