[llvm] r315186 - [X86][SSE] Add test case for PR27708

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 8 12:18:10 PDT 2017


Author: rksimon
Date: Sun Oct  8 12:18:10 2017
New Revision: 315186

URL: http://llvm.org/viewvc/llvm-project?rev=315186&view=rev
Log:
[X86][SSE] Add test case for PR27708

Added:
    llvm/trunk/test/CodeGen/X86/widen_load-3.ll

Added: llvm/trunk/test/CodeGen/X86/widen_load-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_load-3.ll?rev=315186&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_load-3.ll (added)
+++ llvm/trunk/test/CodeGen/X86/widen_load-3.ll Sun Oct  8 12:18:10 2017
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
+; RUN: llc < %s -mtriple=i686-linux -mattr=+avx    | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
+; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2   | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx    | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2   | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
+
+; PR27708
+
+define <7 x i64> @load7_aligned(<7 x i64>* %x) {
+; X86-SSE-LABEL: load7_aligned:
+; X86-SSE:       # BB#0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movaps (%ecx), %xmm0
+; X86-SSE-NEXT:    movaps 16(%ecx), %xmm1
+; X86-SSE-NEXT:    movaps 32(%ecx), %xmm2
+; X86-SSE-NEXT:    movl 48(%ecx), %edx
+; X86-SSE-NEXT:    movl 52(%ecx), %ecx
+; X86-SSE-NEXT:    movl %ecx, 52(%eax)
+; X86-SSE-NEXT:    movl %edx, 48(%eax)
+; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
+; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl $4
+;
+; X86-AVX1-LABEL: load7_aligned:
+; X86-AVX1:       # BB#0:
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX1-NEXT:    vmovaps (%ecx), %ymm0
+; X86-AVX1-NEXT:    vmovaps 32(%ecx), %ymm1
+; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; X86-AVX1-NEXT:    vpextrd $1, %xmm0, 52(%eax)
+; X86-AVX1-NEXT:    vmovd %xmm0, 48(%eax)
+; X86-AVX1-NEXT:    vmovaps %xmm1, 32(%eax)
+; X86-AVX1-NEXT:    vzeroupper
+; X86-AVX1-NEXT:    retl $4
+;
+; X86-AVX2-LABEL: load7_aligned:
+; X86-AVX2:       # BB#0:
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX2-NEXT:    vmovaps (%ecx), %ymm0
+; X86-AVX2-NEXT:    vmovdqa 32(%ecx), %ymm1
+; X86-AVX2-NEXT:    vmovaps %ymm0, (%eax)
+; X86-AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; X86-AVX2-NEXT:    vpextrd $1, %xmm0, 52(%eax)
+; X86-AVX2-NEXT:    vmovd %xmm0, 48(%eax)
+; X86-AVX2-NEXT:    vmovdqa %xmm1, 32(%eax)
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    retl $4
+;
+; X64-SSE-LABEL: load7_aligned:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movaps (%rsi), %xmm0
+; X64-SSE-NEXT:    movaps 16(%rsi), %xmm1
+; X64-SSE-NEXT:    movaps 32(%rsi), %xmm2
+; X64-SSE-NEXT:    movq 48(%rsi), %rax
+; X64-SSE-NEXT:    movq %rax, 48(%rdi)
+; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
+; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    movq %rdi, %rax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load7_aligned:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovaps (%rsi), %ymm0
+; X64-AVX-NEXT:    vmovaps 32(%rsi), %ymm1
+; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
+; X64-AVX-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; X64-AVX-NEXT:    vmovlps %xmm0, 48(%rdi)
+; X64-AVX-NEXT:    vmovaps %xmm1, 32(%rdi)
+; X64-AVX-NEXT:    movq %rdi, %rax
+; X64-AVX-NEXT:    vzeroupper
+; X64-AVX-NEXT:    retq
+  %x1 = load <7 x i64>, <7 x i64>* %x
+  ret <7 x i64> %x1
+}
+
+define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
+; X86-SSE-LABEL: load7_unaligned:
+; X86-SSE:       # BB#0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movups (%ecx), %xmm0
+; X86-SSE-NEXT:    movups 16(%ecx), %xmm1
+; X86-SSE-NEXT:    movups 32(%ecx), %xmm2
+; X86-SSE-NEXT:    movl 48(%ecx), %edx
+; X86-SSE-NEXT:    movl 52(%ecx), %ecx
+; X86-SSE-NEXT:    movl %ecx, 52(%eax)
+; X86-SSE-NEXT:    movl %edx, 48(%eax)
+; X86-SSE-NEXT:    movaps %xmm2, 32(%eax)
+; X86-SSE-NEXT:    movaps %xmm1, 16(%eax)
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl $4
+;
+; X86-AVX-LABEL: load7_unaligned:
+; X86-AVX:       # BB#0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    vmovups (%ecx), %ymm0
+; X86-AVX-NEXT:    vmovups 32(%ecx), %xmm1
+; X86-AVX-NEXT:    movl 48(%ecx), %edx
+; X86-AVX-NEXT:    movl 52(%ecx), %ecx
+; X86-AVX-NEXT:    movl %ecx, 52(%eax)
+; X86-AVX-NEXT:    movl %edx, 48(%eax)
+; X86-AVX-NEXT:    vmovaps %xmm1, 32(%eax)
+; X86-AVX-NEXT:    vmovaps %ymm0, (%eax)
+; X86-AVX-NEXT:    vzeroupper
+; X86-AVX-NEXT:    retl $4
+;
+; X64-SSE-LABEL: load7_unaligned:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movups (%rsi), %xmm0
+; X64-SSE-NEXT:    movups 16(%rsi), %xmm1
+; X64-SSE-NEXT:    movups 32(%rsi), %xmm2
+; X64-SSE-NEXT:    movq 48(%rsi), %rax
+; X64-SSE-NEXT:    movq %rax, 48(%rdi)
+; X64-SSE-NEXT:    movaps %xmm2, 32(%rdi)
+; X64-SSE-NEXT:    movaps %xmm1, 16(%rdi)
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
+; X64-SSE-NEXT:    movq %rdi, %rax
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load7_unaligned:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovups (%rsi), %ymm0
+; X64-AVX-NEXT:    vmovups 32(%rsi), %xmm1
+; X64-AVX-NEXT:    movq 48(%rsi), %rax
+; X64-AVX-NEXT:    movq %rax, 48(%rdi)
+; X64-AVX-NEXT:    vmovaps %xmm1, 32(%rdi)
+; X64-AVX-NEXT:    vmovaps %ymm0, (%rdi)
+; X64-AVX-NEXT:    movq %rdi, %rax
+; X64-AVX-NEXT:    vzeroupper
+; X64-AVX-NEXT:    retq
+  %x1 = load <7 x i64>, <7 x i64>* %x, align 1
+  ret <7 x i64> %x1
+}




More information about the llvm-commits mailing list