[llvm] r318014 - [X86] Regenerate fold-load-unops.ll and add and avx512f command line.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 12 21:25:21 PST 2017


Author: ctopper
Date: Sun Nov 12 21:25:21 2017
New Revision: 318014

URL: http://llvm.org/viewvc/llvm-project?rev=318014&view=rev
Log:
[X86] Regenerate fold-load-unops.ll and add and avx512f command line.

Modified:
    llvm/trunk/test/CodeGen/X86/fold-load-unops.ll

Modified: llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-load-unops.ll?rev=318014&r1=318013&r2=318014&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-load-unops.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-load-unops.ll Sun Nov 12 21:25:21 2017
@@ -1,19 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx512f < %s | FileCheck %s --check-prefix=AVX
 
 ; Verify we fold loads into unary sse intrinsics only when optimizing for size
 
 define float @rcpss(float* %a) {
 ; SSE-LABEL: rcpss:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movss (%rdi), %xmm0
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rcpss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rcpss:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovss (%rdi), %xmm0
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vrcpss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -26,13 +27,13 @@ define float @rcpss(float* %a) {
 define float @rsqrtss(float* %a) {
 ; SSE-LABEL: rsqrtss:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movss (%rdi), %xmm0
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    rsqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: rsqrtss:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovss (%rdi), %xmm0
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -45,13 +46,13 @@ define float @rsqrtss(float* %a) {
 define float @sqrtss(float* %a) {
 ; SSE-LABEL: sqrtss:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movss (%rdi), %xmm0
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE-NEXT:    sqrtss %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtss:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovss (%rdi), %xmm0
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load float, float* %a
@@ -64,13 +65,13 @@ define float @sqrtss(float* %a) {
 define double @sqrtsd(double* %a) {
 ; SSE-LABEL: sqrtsd:
 ; SSE:       # BB#0:
-; SSE-NEXT:    movsd (%rdi), %xmm0
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE-NEXT:    sqrtsd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: sqrtsd:
 ; AVX:       # BB#0:
-; AVX-NEXT:    vmovsd (%rdi), %xmm0
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0
 ; AVX-NEXT:    retq
     %ld = load double, double* %a




More information about the llvm-commits mailing list