[llvm] r356787 - [X86] Regenerate powi tests to include i686 x87/sse targets
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 22 11:04:29 PDT 2019
Author: rksimon
Date: Fri Mar 22 11:04:28 2019
New Revision: 356787
URL: http://llvm.org/viewvc/llvm-project?rev=356787&view=rev
Log:
[X86] Regenerate powi tests to include i686 x87/sse targets
Modified:
llvm/trunk/test/CodeGen/X86/powi.ll
Modified: llvm/trunk/test/CodeGen/X86/powi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/powi.ll?rev=356787&r1=356786&r2=356787&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/powi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/powi.ll Fri Mar 22 11:04:28 2019
@@ -1,37 +1,123 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=CHECK,X86,X86-X87
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+
; Ideally this would compile to 5 multiplies.
define double @pow_wrapper(double %a) nounwind readonly ssp noredzone {
-; CHECK-LABEL: pow_wrapper:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movapd %xmm0, %xmm1
-; CHECK-NEXT: mulsd %xmm0, %xmm1
-; CHECK-NEXT: mulsd %xmm1, %xmm0
-; CHECK-NEXT: mulsd %xmm1, %xmm1
-; CHECK-NEXT: mulsd %xmm1, %xmm0
-; CHECK-NEXT: mulsd %xmm1, %xmm1
-; CHECK-NEXT: mulsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: retq
+; X86-X87-LABEL: pow_wrapper:
+; X86-X87: # %bb.0:
+; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-X87-NEXT: fld %st(0)
+; X86-X87-NEXT: fmul %st(1), %st
+; X86-X87-NEXT: fmul %st, %st(1)
+; X86-X87-NEXT: fmul %st, %st(0)
+; X86-X87-NEXT: fmul %st, %st(1)
+; X86-X87-NEXT: fmul %st, %st(0)
+; X86-X87-NEXT: fmulp %st, %st(1)
+; X86-X87-NEXT: retl
+;
+; X86-SSE-LABEL: pow_wrapper:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pushl %ebp
+; X86-SSE-NEXT: movl %esp, %ebp
+; X86-SSE-NEXT: andl $-8, %esp
+; X86-SSE-NEXT: subl $8, %esp
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movapd %xmm0, %xmm1
+; X86-SSE-NEXT: mulsd %xmm0, %xmm1
+; X86-SSE-NEXT: mulsd %xmm1, %xmm0
+; X86-SSE-NEXT: mulsd %xmm1, %xmm1
+; X86-SSE-NEXT: mulsd %xmm1, %xmm0
+; X86-SSE-NEXT: mulsd %xmm1, %xmm1
+; X86-SSE-NEXT: mulsd %xmm0, %xmm1
+; X86-SSE-NEXT: movsd %xmm1, (%esp)
+; X86-SSE-NEXT: fldl (%esp)
+; X86-SSE-NEXT: movl %ebp, %esp
+; X86-SSE-NEXT: popl %ebp
+; X86-SSE-NEXT: retl
+;
+; X64-LABEL: pow_wrapper:
+; X64: # %bb.0:
+; X64-NEXT: movapd %xmm0, %xmm1
+; X64-NEXT: mulsd %xmm0, %xmm1
+; X64-NEXT: mulsd %xmm1, %xmm0
+; X64-NEXT: mulsd %xmm1, %xmm1
+; X64-NEXT: mulsd %xmm1, %xmm0
+; X64-NEXT: mulsd %xmm1, %xmm1
+; X64-NEXT: mulsd %xmm0, %xmm1
+; X64-NEXT: movapd %xmm1, %xmm0
+; X64-NEXT: retq
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
define double @pow_wrapper_optsize(double %a) optsize {
-; CHECK-LABEL: pow_wrapper_optsize:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl $15, %edi
-; CHECK-NEXT: jmp
+; X86-X87-LABEL: pow_wrapper_optsize:
+; X86-X87: # %bb.0:
+; X86-X87-NEXT: subl $12, %esp
+; X86-X87-NEXT: .cfi_def_cfa_offset 16
+; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-X87-NEXT: fstpl (%esp)
+; X86-X87-NEXT: movl $15, {{[0-9]+}}(%esp)
+; X86-X87-NEXT: calll __powidf2
+; X86-X87-NEXT: addl $12, %esp
+; X86-X87-NEXT: .cfi_def_cfa_offset 4
+; X86-X87-NEXT: retl
+;
+; X86-SSE-LABEL: pow_wrapper_optsize:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: subl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 16
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd %xmm0, (%esp)
+; X86-SSE-NEXT: movl $15, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: calll __powidf2
+; X86-SSE-NEXT: addl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE-NEXT: retl
+;
+; X64-LABEL: pow_wrapper_optsize:
+; X64: # %bb.0:
+; X64-NEXT: movl $15, %edi
+; X64-NEXT: jmp __powidf2 # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
define double @pow_wrapper_minsize(double %a) minsize {
-; CHECK-LABEL: pow_wrapper_minsize:
-; CHECK: # %bb.0:
-; CHECK-NEXT: pushq $15
-; CHECK: popq %rdi
-; CHECK: jmp
+; X86-X87-LABEL: pow_wrapper_minsize:
+; X86-X87: # %bb.0:
+; X86-X87-NEXT: subl $12, %esp
+; X86-X87-NEXT: .cfi_def_cfa_offset 16
+; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-X87-NEXT: fstpl (%esp)
+; X86-X87-NEXT: movl $15, {{[0-9]+}}(%esp)
+; X86-X87-NEXT: calll __powidf2
+; X86-X87-NEXT: addl $12, %esp
+; X86-X87-NEXT: .cfi_def_cfa_offset 4
+; X86-X87-NEXT: retl
+;
+; X86-SSE-LABEL: pow_wrapper_minsize:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: subl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 16
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE-NEXT: movsd %xmm0, (%esp)
+; X86-SSE-NEXT: movl $15, {{[0-9]+}}(%esp)
+; X86-SSE-NEXT: calll __powidf2
+; X86-SSE-NEXT: addl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE-NEXT: retl
+;
+; X64-LABEL: pow_wrapper_minsize:
+; X64: # %bb.0:
+; X64-NEXT: pushq $15
+; X64-NEXT: .cfi_adjust_cfa_offset 8
+; X64-NEXT: popq %rdi
+; X64-NEXT: .cfi_adjust_cfa_offset -8
+; X64-NEXT: jmp __powidf2 # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
More information about the llvm-commits
mailing list