[llvm] bc65352 - [X86][Test] Update tests for `lround` and `llrint` (NFC) (#157807)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 10 01:30:48 PDT 2025
Author: Trevor Gross
Date: 2025-09-10T08:30:43Z
New Revision: bc65352ab5eeabecd6d8dca0ca1d5a628101dfde
URL: https://github.com/llvm/llvm-project/commit/bc65352ab5eeabecd6d8dca0ca1d5a628101dfde
DIFF: https://github.com/llvm/llvm-project/commit/bc65352ab5eeabecd6d8dca0ca1d5a628101dfde.diff
LOG: [X86][Test] Update tests for `lround` and `llrint` (NFC) (#157807)
Apply the following changes:
* Ensure all float types are covered (`f16` and `f128` were often
missing)
* Switch to more straightforward test names
* Remove some CHECK directives that are outdated (prefix changed but the
directive did not get removed)
* Add common check prefixes to merge similar blocks
* Test a more similar set of platforms
* Add missing `nounwind`
* Test `strictfp` for each libcall where possible
This is a pre-test for [1].
[1]: https://github.com/llvm/llvm-project/pull/152684
Added:
Modified:
llvm/test/CodeGen/X86/llrint-conv.ll
llvm/test/CodeGen/X86/llround-conv.ll
llvm/test/CodeGen/X86/lrint-conv-i32.ll
llvm/test/CodeGen/X86/lrint-conv-i64.ll
llvm/test/CodeGen/X86/lround-conv-i32.ll
llvm/test/CodeGen/X86/lround-conv-i64.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/llrint-conv.ll b/llvm/test/CodeGen/X86/llrint-conv.ll
index 7bcf573118538..5f38645f74636 100644
--- a/llvm/test/CodeGen/X86/llrint-conv.ll
+++ b/llvm/test/CodeGen/X86/llrint-conv.ll
@@ -7,47 +7,15 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
-define i64 @testmsxh(half %x) nounwind {
-; X86-NOSSE-LABEL: testmsxh:
-; X86-NOSSE: # %bb.0: # %entry
-; X86-NOSSE-NEXT: pushl %eax
-; X86-NOSSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT: movl %eax, (%esp)
-; X86-NOSSE-NEXT: calll __extendhfsf2
-; X86-NOSSE-NEXT: fstps (%esp)
-; X86-NOSSE-NEXT: calll llrintf
-; X86-NOSSE-NEXT: popl %ecx
-; X86-NOSSE-NEXT: retl
-;
-; X86-SSE2-LABEL: testmsxh:
-; X86-SSE2: # %bb.0: # %entry
-; X86-SSE2-NEXT: pushl %eax
-; X86-SSE2-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
-; X86-SSE2-NEXT: pextrw $0, %xmm0, %eax
-; X86-SSE2-NEXT: movw %ax, (%esp)
-; X86-SSE2-NEXT: calll __extendhfsf2
-; X86-SSE2-NEXT: fstps (%esp)
-; X86-SSE2-NEXT: calll llrintf
-; X86-SSE2-NEXT: popl %ecx
-; X86-SSE2-NEXT: retl
-;
-; X64-SSE-LABEL: testmsxh:
-; X64-SSE: # %bb.0: # %entry
-; X64-SSE-NEXT: pushq %rax
-; X64-SSE-NEXT: callq __extendhfsf2 at PLT
-; X64-SSE-NEXT: callq rintf at PLT
-; X64-SSE-NEXT: callq __truncsfhf2 at PLT
-; X64-SSE-NEXT: callq __extendhfsf2 at PLT
-; X64-SSE-NEXT: cvttss2si %xmm0, %rax
-; X64-SSE-NEXT: popq %rcx
-; X64-SSE-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
- ret i64 %0
-}
+; FIXME: crash
+; define i64 @test_llrint_i64_f16(half %x) nounwind {
+; entry:
+; %0 = tail call i64 @llvm.llrint.i64.f16(half %x)
+; ret i64 %0
+; }
-define i64 @testmsxs(float %x) nounwind {
-; X86-NOSSE-LABEL: testmsxs:
+define i64 @test_llrint_i64_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f32:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
; X86-NOSSE-NEXT: movl %esp, %ebp
@@ -61,7 +29,7 @@ define i64 @testmsxs(float %x) nounwind {
; X86-NOSSE-NEXT: popl %ebp
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmsxs:
+; X86-SSE2-LABEL: test_llrint_i64_f32:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
@@ -77,7 +45,7 @@ define i64 @testmsxs(float %x) nounwind {
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmsxs:
+; X86-AVX-LABEL: test_llrint_i64_f32:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: movl %esp, %ebp
@@ -93,12 +61,12 @@ define i64 @testmsxs(float %x) nounwind {
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: retl
;
-; X64-SSE-LABEL: testmsxs:
+; X64-SSE-LABEL: test_llrint_i64_f32:
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: retq
;
-; X64-AVX-LABEL: testmsxs:
+; X64-AVX-LABEL: test_llrint_i64_f32:
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
; X64-AVX-NEXT: retq
@@ -107,8 +75,8 @@ entry:
ret i64 %0
}
-define i64 @testmsxd(double %x) nounwind {
-; X86-NOSSE-LABEL: testmsxd:
+define i64 @test_llrint_i64_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f64:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
; X86-NOSSE-NEXT: movl %esp, %ebp
@@ -122,7 +90,7 @@ define i64 @testmsxd(double %x) nounwind {
; X86-NOSSE-NEXT: popl %ebp
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmsxd:
+; X86-SSE2-LABEL: test_llrint_i64_f64:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
@@ -138,7 +106,7 @@ define i64 @testmsxd(double %x) nounwind {
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmsxd:
+; X86-AVX-LABEL: test_llrint_i64_f64:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: movl %esp, %ebp
@@ -154,12 +122,12 @@ define i64 @testmsxd(double %x) nounwind {
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: retl
;
-; X64-SSE-LABEL: testmsxd:
+; X64-SSE-LABEL: test_llrint_i64_f64:
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: retq
;
-; X64-AVX-LABEL: testmsxd:
+; X64-AVX-LABEL: test_llrint_i64_f64:
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: retq
@@ -168,8 +136,8 @@ entry:
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) nounwind {
-; X86-LABEL: testmsll:
+define i64 @test_llrint_i64_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_llrint_i64_f80:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
@@ -183,7 +151,7 @@ define i64 @testmsll(x86_fp80 %x) nounwind {
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; X64-LABEL: testmsll:
+; X64-LABEL: test_llrint_i64_f80:
; X64: # %bb.0: # %entry
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fistpll -{{[0-9]+}}(%rsp)
@@ -195,8 +163,8 @@ entry:
}
; FIXME(#44744): incorrect libcall
-define i64 @testmslq(fp128 %x) nounwind {
-; X86-NOSSE-LABEL: testmslq:
+define i64 @test_llrint_i64_f128(fp128 %x) nounwind {
+; X86-NOSSE-LABEL: test_llrint_i64_f128:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
; X86-NOSSE-NEXT: movl %esp, %ebp
@@ -212,7 +180,7 @@ define i64 @testmslq(fp128 %x) nounwind {
; X86-NOSSE-NEXT: popl %ebp
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmslq:
+; X86-SSE2-LABEL: test_llrint_i64_f128:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
@@ -228,7 +196,7 @@ define i64 @testmslq(fp128 %x) nounwind {
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmslq:
+; X86-AVX-LABEL: test_llrint_i64_f128:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: movl %esp, %ebp
@@ -241,11 +209,181 @@ define i64 @testmslq(fp128 %x) nounwind {
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: retl
;
-; X64-LABEL: testmslq:
+; X64-LABEL: test_llrint_i64_f128:
; X64: # %bb.0: # %entry
; X64-NEXT: jmp llrintl at PLT # TAILCALL
entry:
- %0 = tail call i64 @llvm.llrint.i64.fp128(fp128 %x)
+ %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
+; FIXME: crash
+; define i64 @test_llrint_i64_f16_strict(half %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
+
+define i64 @test_llrint_i64_f32_strict(float %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f32_strict:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: wait
+; X86-NOSSE-NEXT: calll llrintf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f32_strict:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llrintf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f32_strict:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: calll llrintf
+; X86-AVX-NEXT: popl %ecx
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_llrint_i64_f32_strict:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq llrintf at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %0
+}
+
+define i64 @test_llrint_i64_f64_strict(double %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f64_strict:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: wait
+; X86-NOSSE-NEXT: calll llrint
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f64_strict:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llrint
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f64_strict:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: calll llrint
+; X86-AVX-NEXT: addl $8, %esp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_llrint_i64_f64_strict:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq llrint at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %0
+}
+
+define i64 @test_llrint_i64_f80_strict(x86_fp80 %x) nounwind strictfp {
+; X86-LABEL: test_llrint_i64_f80_strict:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: fldt {{[0-9]+}}(%esp)
+; X86-NEXT: fstpt (%esp)
+; X86-NEXT: wait
+; X86-NEXT: calll llrintl
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_llrint_i64_f80_strict:
+; X64: # %bb.0: # %entry
+; X64-NEXT: subq $24, %rsp
+; X64-NEXT: fldt {{[0-9]+}}(%rsp)
+; X64-NEXT: fstpt (%rsp)
+; X64-NEXT: wait
+; X64-NEXT: callq llrintl at PLT
+; X64-NEXT: addq $24, %rsp
+; X64-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %0
+}
+
+; FIXME(#44744): incorrect libcall
+define i64 @test_llrint_i64_f128_strict(fp128 %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_llrint_i64_f128_strict:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll llrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_llrint_i64_f128_strict:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll llrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_llrint_i64_f128_strict:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll llrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_llrint_i64_f128_strict:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq llrintl at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
ret i64 %0
}
diff --git a/llvm/test/CodeGen/X86/llround-conv.ll b/llvm/test/CodeGen/X86/llround-conv.ll
index 19a980b72809e..ef4df82e9e57e 100644
--- a/llvm/test/CodeGen/X86/llround-conv.ll
+++ b/llvm/test/CodeGen/X86/llround-conv.ll
@@ -1,88 +1,84 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86,X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
-define i64 @testmsxs(float %x) {
-; X86-LABEL: testmsxs:
-; X86: # %bb.0: # %entry
-; X86-NEXT: pushl %eax
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: flds {{[0-9]+}}(%esp)
-; X86-NEXT: fstps (%esp)
-; X86-NEXT: calll llroundf
-; X86-NEXT: popl %ecx
-; X86-NEXT: .cfi_def_cfa_offset 4
-; X86-NEXT: retl
+; FIXME: crash
+; define i64 @test_llround_f16(half %x) nounwind {
+; %conv = tail call i64 @llvm.llround.f16(half %x)
+; ret i64 %conv
+; }
+
+define i64 @test_llround_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_llround_f32:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll llroundf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
;
-; SSE2-LABEL: testmsxs:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: pushl %eax
-; SSE2-NEXT: .cfi_def_cfa_offset 8
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss %xmm0, (%esp)
-; SSE2-NEXT: calll llroundf
-; SSE2-NEXT: popl %ecx
-; SSE2-NEXT: .cfi_def_cfa_offset 4
-; SSE2-NEXT: retl
+; X86-SSE2-LABEL: test_llround_f32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llroundf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
;
-; GISEL-X86-LABEL: testmsxs:
-; GISEL-X86: # %bb.0: # %entry
+; X64-LABEL: test_llround_f32:
+; X64: # %bb.0:
+; X64-NEXT: jmp llroundf at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_llround_f32:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl %eax, (%esp)
; GISEL-X86-NEXT: calll llroundf
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmsxs:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llroundf at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxs:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f32:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 16
; GISEL-X64-NEXT: callq llroundf
; GISEL-X64-NEXT: popq %rcx
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.llround.f32(float %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.llround.f32(float %x)
+ ret i64 %conv
}
-define i64 @testmsxd(double %x) {
-; X86-LABEL: testmsxd:
-; X86: # %bb.0: # %entry
-; X86-NEXT: subl $8, %esp
-; X86-NEXT: .cfi_def_cfa_offset 12
-; X86-NEXT: fldl {{[0-9]+}}(%esp)
-; X86-NEXT: fstpl (%esp)
-; X86-NEXT: calll llround
-; X86-NEXT: addl $8, %esp
-; X86-NEXT: .cfi_def_cfa_offset 4
-; X86-NEXT: retl
+define i64 @test_llround_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_llround_f64:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: calll llround
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
;
-; SSE2-LABEL: testmsxd:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: subl $8, %esp
-; SSE2-NEXT: .cfi_def_cfa_offset 12
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: movsd %xmm0, (%esp)
-; SSE2-NEXT: calll llround
-; SSE2-NEXT: addl $8, %esp
-; SSE2-NEXT: .cfi_def_cfa_offset 4
-; SSE2-NEXT: retl
+; X86-SSE2-LABEL: test_llround_f64:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llround
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
;
-; GISEL-X86-LABEL: testmsxd:
-; GISEL-X86: # %bb.0: # %entry
+; X64-LABEL: test_llround_f64:
+; X64: # %bb.0:
+; X64-NEXT: jmp llround at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_llround_f64:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: leal {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GISEL-X86-NEXT: movl 4(%eax), %eax
@@ -92,111 +88,140 @@ define i64 @testmsxd(double %x) {
; GISEL-X86-NEXT: movl %eax, 4(%edx)
; GISEL-X86-NEXT: calll llround
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmsxd:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llround at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxd:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f64:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 16
; GISEL-X64-NEXT: callq llround
; GISEL-X64-NEXT: popq %rcx
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.llround.f64(double %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.llround.f64(double %x)
+ ret i64 %conv
}
-define i64 @testmsll(x86_fp80 %x) {
-; X86-LABEL: testmsll:
-; X86: # %bb.0: # %entry
+define i64 @test_llround_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_llround_f80:
+; X86: # %bb.0:
; X86-NEXT: subl $12, %esp
-; X86-NEXT: .cfi_def_cfa_offset 16
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fstpt (%esp)
; X86-NEXT: calll llroundl
; X86-NEXT: addl $12, %esp
-; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
-; SSE2-LABEL: testmsll:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: subl $12, %esp
-; SSE2-NEXT: .cfi_def_cfa_offset 16
-; SSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE2-NEXT: fstpt (%esp)
-; SSE2-NEXT: calll llroundl
-; SSE2-NEXT: addl $12, %esp
-; SSE2-NEXT: .cfi_def_cfa_offset 4
-; SSE2-NEXT: retl
+; X64-LABEL: test_llround_f80:
+; X64: # %bb.0:
+; X64-NEXT: jmp llroundl at PLT # TAILCALL
;
-; GISEL-X86-LABEL: testmsll:
-; GISEL-X86: # %bb.0: # %entry
+; GISEL-X86-LABEL: test_llround_f80:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp)
; GISEL-X86-NEXT: fstpt (%esp)
; GISEL-X86-NEXT: calll llroundl
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmsll:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llroundl at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsll:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_llround_f80:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: subq $24, %rsp
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 32
; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp)
; GISEL-X64-NEXT: fstpt (%rsp)
; GISEL-X64-NEXT: callq llroundl
; GISEL-X64-NEXT: addq $24, %rsp
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.llround.f80(x86_fp80 %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.llround.f80(x86_fp80 %x)
+ ret i64 %conv
}
-define i64 @test_llround_i64_f32(float %x) nounwind {
-; X86-LABEL: test_llround_i64_f32:
+; FIXME(#44744): incorrect libcall
+define i64 @test_llround_f128(fp128 %x) nounwind {
+; X86-LABEL: test_llround_f128:
; X86: # %bb.0:
-; X86-NEXT: pushl %eax
-; X86-NEXT: flds {{[0-9]+}}(%esp)
-; X86-NEXT: fstps (%esp)
-; X86-NEXT: calll llroundf
-; X86-NEXT: popl %ecx
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll llroundl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE2-LABEL: test_llround_i64_f32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pushl %eax
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss %xmm0, (%esp)
-; SSE2-NEXT: calll llroundf
-; SSE2-NEXT: popl %ecx
-; SSE2-NEXT: retl
+; X64-LABEL: test_llround_f128:
+; X64: # %bb.0:
+; X64-NEXT: jmp llroundl at PLT # TAILCALL
;
-; GISEL-X86-LABEL: test_llround_i64_f32:
+; GISEL-X86-LABEL: test_llround_f128:
; GISEL-X86: # %bb.0:
-; GISEL-X86-NEXT: subl $12, %esp
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: subl $24, %esp
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; GISEL-X86-NEXT: movl %eax, (%esp)
-; GISEL-X86-NEXT: calll llroundf
-; GISEL-X86-NEXT: addl $12, %esp
+; GISEL-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: calll llroundf128
+; GISEL-X86-NEXT: addl $24, %esp
+; GISEL-X86-NEXT: popl %esi
; GISEL-X86-NEXT: retl
;
+; GISEL-X64-LABEL: test_llround_f128:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: callq llroundf128
+; GISEL-X64-NEXT: popq %rcx
+; GISEL-X64-NEXT: retq
+ %conv = tail call i64 @llvm.llround.f128(fp128 %x)
+ ret i64 %conv
+}
+
+; FIXME: crash
+; define i64 @test_llround_i64_f16(half %x) nounwind {
+; %conv = call i64 @llvm.llround.i64.f16(half %x)
+; ret i64 %conv
+; }
+
+define i64 @test_llround_i64_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_llround_i64_f32:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll llroundf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_llround_i64_f32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llroundf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
; X64-LABEL: test_llround_i64_f32:
; X64: # %bb.0:
; X64-NEXT: jmp llroundf at PLT # TAILCALL
;
+; GISEL-X86-LABEL: test_llround_i64_f32:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: subl $12, %esp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: calll llroundf
+; GISEL-X86-NEXT: addl $12, %esp
+; GISEL-X86-NEXT: retl
+;
; GISEL-X64-LABEL: test_llround_i64_f32:
; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
@@ -208,23 +233,27 @@ define i64 @test_llround_i64_f32(float %x) nounwind {
}
define i64 @test_llround_i64_f64(double %x) nounwind {
-; X86-LABEL: test_llround_i64_f64:
-; X86: # %bb.0:
-; X86-NEXT: subl $8, %esp
-; X86-NEXT: fldl {{[0-9]+}}(%esp)
-; X86-NEXT: fstpl (%esp)
-; X86-NEXT: calll llround
-; X86-NEXT: addl $8, %esp
-; X86-NEXT: retl
+; X86-NOSSE-LABEL: test_llround_i64_f64:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: calll llround
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_llround_i64_f64:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll llround
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
;
-; SSE2-LABEL: test_llround_i64_f64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: subl $8, %esp
-; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: movsd %xmm0, (%esp)
-; SSE2-NEXT: calll llround
-; SSE2-NEXT: addl $8, %esp
-; SSE2-NEXT: retl
+; X64-LABEL: test_llround_i64_f64:
+; X64: # %bb.0:
+; X64-NEXT: jmp llround at PLT # TAILCALL
;
; GISEL-X86-LABEL: test_llround_i64_f64:
; GISEL-X86: # %bb.0:
@@ -240,10 +269,6 @@ define i64 @test_llround_i64_f64(double %x) nounwind {
; GISEL-X86-NEXT: addl $12, %esp
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: test_llround_i64_f64:
-; X64: # %bb.0:
-; X64-NEXT: jmp llround at PLT # TAILCALL
-;
; GISEL-X64-LABEL: test_llround_i64_f64:
; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
@@ -264,14 +289,9 @@ define i64 @test_llround_i64_f80(x86_fp80 %x) nounwind {
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
;
-; SSE2-LABEL: test_llround_i64_f80:
-; SSE2: # %bb.0:
-; SSE2-NEXT: subl $12, %esp
-; SSE2-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE2-NEXT: fstpt (%esp)
-; SSE2-NEXT: calll llroundl
-; SSE2-NEXT: addl $12, %esp
-; SSE2-NEXT: retl
+; X64-LABEL: test_llround_i64_f80:
+; X64: # %bb.0:
+; X64-NEXT: jmp llroundl at PLT # TAILCALL
;
; GISEL-X86-LABEL: test_llround_i64_f80:
; GISEL-X86: # %bb.0:
@@ -282,10 +302,6 @@ define i64 @test_llround_i64_f80(x86_fp80 %x) nounwind {
; GISEL-X86-NEXT: addl $12, %esp
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: test_llround_i64_f80:
-; X64: # %bb.0:
-; X64-NEXT: jmp llroundl at PLT # TAILCALL
-;
; GISEL-X64-LABEL: test_llround_i64_f80:
; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: subq $24, %rsp
@@ -297,3 +313,79 @@ define i64 @test_llround_i64_f80(x86_fp80 %x) nounwind {
%conv = call i64 @llvm.llround.i64.f80(x86_fp80 %x)
ret i64 %conv
}
+
+; FIXME(#44744): incorrect libcall
+define i64 @test_llround_i64_f128(fp128 %x) nounwind {
+; X86-LABEL: test_llround_i64_f128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll llroundl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_llround_i64_f128:
+; X64: # %bb.0:
+; X64-NEXT: jmp llroundl at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_llround_i64_f128:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: subl $24, %esp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: calll llroundf128
+; GISEL-X86-NEXT: addl $24, %esp
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: retl
+;
+; GISEL-X64-LABEL: test_llround_i64_f128:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: callq llroundf128
+; GISEL-X64-NEXT: popq %rcx
+; GISEL-X64-NEXT: retq
+ %conv = call i64 @llvm.llround.i64.f128(fp128 %x)
+ ret i64 %conv
+}
+
+; FIXME: not yet implemented for global isel
+; define i64 @test_llround_i64_f16_strict(half %x) nounwind strictfp {
+; %conv = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
+
+; define i64 @test_llround_i64_f32_strict(float %x) nounwind strictfp {
+; %conv = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
+
+; define i64 @test_llround_i64_f64_strict(double %x) nounwind strictfp {
+; %conv = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
+
+; define i64 @test_llround_i64_f80_strict(x86_fp80 %x) nounwind strictfp {
+; %conv = call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
+
+; ; FIXME(#44744): incorrect libcall
+; define i64 @test_llround_i64_f128_strict(fp128 %x) nounwind strictfp {
+; %conv = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i32.ll b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
index 3c50aea1095f4..2b99b4c50f58a 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
@@ -8,15 +8,15 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
; FIXME: crash
-; define i32 @testmswh(half %x) nounwind {
+; define i32 @test_lrint_i32_f16(half %x) nounwind {
; entry:
; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
; ret i32 %0
; }
-define i32 @testmsws(float %x) nounwind {
-; X86-NOSSE-LABEL: testmsws:
-; X86-NOSSE: # %bb.0: # %entry
+define i32 @test_lrint_i32_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i32_f32:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl %eax
; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
@@ -24,33 +24,32 @@ define i32 @testmsws(float %x) nounwind {
; X86-NOSSE-NEXT: popl %ecx
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmsws:
-; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-LABEL: test_lrint_i32_f32:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtss2si {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmsws:
-; X86-AVX: # %bb.0: # %entry
+; X86-AVX-LABEL: test_lrint_i32_f32:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vcvtss2si {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: retl
;
-; X64-SSE-LABEL: testmsws:
-; X64-SSE: # %bb.0: # %entry
+; X64-SSE-LABEL: test_lrint_i32_f32:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtss2si %xmm0, %eax
; X64-SSE-NEXT: retq
;
-; X64-AVX-LABEL: testmsws:
-; X64-AVX: # %bb.0: # %entry
+; X64-AVX-LABEL: test_lrint_i32_f32:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtss2si %xmm0, %eax
; X64-AVX-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lrint.i32.f32(float %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lrint.i32.f32(float %x)
+ ret i32 %conv
}
-define i32 @testmswd(double %x) nounwind {
-; X86-NOSSE-LABEL: testmswd:
-; X86-NOSSE: # %bb.0: # %entry
+define i32 @test_lrint_i32_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i32_f64:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl %eax
; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
@@ -58,33 +57,32 @@ define i32 @testmswd(double %x) nounwind {
; X86-NOSSE-NEXT: popl %ecx
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmswd:
-; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-LABEL: test_lrint_i32_f64:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtsd2si {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmswd:
-; X86-AVX: # %bb.0: # %entry
+; X86-AVX-LABEL: test_lrint_i32_f64:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vcvtsd2si {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: retl
;
-; X64-SSE-LABEL: testmswd:
-; X64-SSE: # %bb.0: # %entry
+; X64-SSE-LABEL: test_lrint_i32_f64:
+; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %eax
; X64-SSE-NEXT: retq
;
-; X64-AVX-LABEL: testmswd:
-; X64-AVX: # %bb.0: # %entry
+; X64-AVX-LABEL: test_lrint_i32_f64:
+; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtsd2si %xmm0, %eax
; X64-AVX-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lrint.i32.f64(double %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lrint.i32.f64(double %x)
+ ret i32 %conv
}
-define i32 @testmsll(x86_fp80 %x) nounwind {
-; X86-LABEL: testmsll:
-; X86: # %bb.0: # %entry
+define i32 @test_lrint_i32_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_lrint_i32_f80:
+; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fistpl (%esp)
@@ -92,21 +90,20 @@ define i32 @testmsll(x86_fp80 %x) nounwind {
; X86-NEXT: popl %ecx
; X86-NEXT: retl
;
-; X64-LABEL: testmsll:
-; X64: # %bb.0: # %entry
+; X64-LABEL: test_lrint_i32_f80:
+; X64: # %bb.0:
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fistpl -{{[0-9]+}}(%rsp)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %eax
; X64-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lrint.i32.f80(x86_fp80 %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lrint.i32.f80(x86_fp80 %x)
+ ret i32 %conv
}
; FIXME(#44744): incorrect libcall
-define i32 @testmswq(fp128 %x) nounwind {
-; X86-NOSSE-LABEL: testmswq:
-; X86-NOSSE: # %bb.0: # %entry
+define i32 @test_lrint_i32_f128(fp128 %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i32_f128:
+; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl %ebp
; X86-NOSSE-NEXT: movl %esp, %ebp
; X86-NOSSE-NEXT: andl $-16, %esp
@@ -121,8 +118,8 @@ define i32 @testmswq(fp128 %x) nounwind {
; X86-NOSSE-NEXT: popl %ebp
; X86-NOSSE-NEXT: retl
;
-; X86-SSE2-LABEL: testmswq:
-; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-LABEL: test_lrint_i32_f128:
+; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: andl $-16, %esp
@@ -137,8 +134,8 @@ define i32 @testmswq(fp128 %x) nounwind {
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: retl
;
-; X86-AVX-LABEL: testmswq:
-; X86-AVX: # %bb.0: # %entry
+; X86-AVX-LABEL: test_lrint_i32_f128:
+; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: movl %esp, %ebp
; X86-AVX-NEXT: andl $-16, %esp
@@ -150,12 +147,176 @@ define i32 @testmswq(fp128 %x) nounwind {
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: retl
;
-; X64-LABEL: testmswq:
-; X64: # %bb.0: # %entry
+; X64-LABEL: test_lrint_i32_f128:
+; X64: # %bb.0:
; X64-NEXT: jmp lrintl at PLT # TAILCALL
-entry:
- %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %conv
+}
+
+; FIXME: crash
+; define i32 @test_lrint_i32_f16_strict(half %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+
+define i32 @test_lrint_i32_f32_strict(float %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_lrint_i32_f32_strict:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: wait
+; X86-NOSSE-NEXT: calll lrintf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i32_f32_strict:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lrintf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_lrint_i32_f32_strict:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: calll lrintf
+; X86-AVX-NEXT: popl %ecx
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_lrint_i32_f32_strict:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq lrintf at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %conv = tail call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i32 %conv
+}
+
+define i32 @test_lrint_i32_f64_strict(double %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_lrint_i32_f64_strict:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: wait
+; X86-NOSSE-NEXT: calll lrint
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i32_f64_strict:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lrint
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_lrint_i32_f64_strict:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: calll lrint
+; X86-AVX-NEXT: addl $8, %esp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_lrint_i32_f64_strict:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq lrint at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %conv = tail call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i32 %conv
+}
+
+define i32 @test_lrint_i32_f80_strict(x86_fp80 %x) nounwind strictfp {
+; X86-LABEL: test_lrint_i32_f80_strict:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: fldt {{[0-9]+}}(%esp)
+; X86-NEXT: fstpt (%esp)
+; X86-NEXT: wait
+; X86-NEXT: calll lrintl
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_lrint_i32_f80_strict:
+; X64: # %bb.0:
+; X64-NEXT: subq $24, %rsp
+; X64-NEXT: fldt {{[0-9]+}}(%rsp)
+; X64-NEXT: fstpt (%rsp)
+; X64-NEXT: wait
+; X64-NEXT: callq lrintl at PLT
+; X64-NEXT: addq $24, %rsp
+; X64-NEXT: retq
+ %conv = tail call i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i32 %conv
+}
+
+; FIXME(#44744): incorrect libcall
+define i32 @test_lrint_i32_f128_strict(fp128 %x) nounwind strictfp {
+; X86-NOSSE-LABEL: test_lrint_i32_f128_strict:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll lrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i32_f128_strict:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll lrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: test_lrint_i32_f128_strict:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll lrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: test_lrint_i32_f128_strict:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq lrintl at PLT
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %conv = tail call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i32 %conv
}
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i64.ll b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
index 2ba1500df0b6e..731c03bf0d747 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
@@ -1,92 +1,311 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86,X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
-define i64 @testmsxh(half %x) nounwind {
-; SSE-LABEL: testmsxh:
-; SSE: # %bb.0: # %entry
-; SSE-NEXT: pushq %rax
-; SSE-NEXT: callq __extendhfsf2 at PLT
-; SSE-NEXT: callq rintf at PLT
-; SSE-NEXT: callq __truncsfhf2 at PLT
-; SSE-NEXT: callq __extendhfsf2 at PLT
-; SSE-NEXT: cvttss2si %xmm0, %rax
-; SSE-NEXT: popq %rcx
-; SSE-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
- ret i64 %0
-}
+; FIXME: crash
+; define i64 @test_lrint_i64_f16(half %x) nounwind {
+; %conv = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %conv
+; }
-define i64 @testmsxs(float %x) nounwind {
-; SSE-LABEL: testmsxs:
-; SSE: # %bb.0: # %entry
+define i64 @test_lrint_i64_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i64_f32:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: flds 8(%ebp)
+; X86-NOSSE-NEXT: fistpll (%esp)
+; X86-NOSSE-NEXT: movl (%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i64_f32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: fistpll (%esp)
+; X86-SSE2-NEXT: movl (%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; SSE-LABEL: test_lrint_i64_f32:
+; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: retq
;
-; AVX-LABEL: testmsxs:
-; AVX: # %bb.0: # %entry
+; AVX-LABEL: test_lrint_i64_f32:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtss2si %xmm0, %rax
; AVX-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f32(float %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.lrint.i64.f32(float %x)
+ ret i64 %conv
}
-define i64 @testmsxd(double %x) nounwind {
-; SSE-LABEL: testmsxd:
-; SSE: # %bb.0: # %entry
+define i64 @test_lrint_i64_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i64_f64:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl 8(%ebp)
+; X86-NOSSE-NEXT: fistpll (%esp)
+; X86-NOSSE-NEXT: movl (%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i64_f64:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: fldl (%esp)
+; X86-SSE2-NEXT: fistpll (%esp)
+; X86-SSE2-NEXT: movl (%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; SSE-LABEL: test_lrint_i64_f64:
+; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
; SSE-NEXT: retq
;
-; AVX-LABEL: testmsxd:
-; AVX: # %bb.0: # %entry
+; AVX-LABEL: test_lrint_i64_f64:
+; AVX: # %bb.0:
; AVX-NEXT: vcvtsd2si %xmm0, %rax
; AVX-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f64(double %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.lrint.i64.f64(double %x)
+ ret i64 %conv
}
-define i64 @testmsll(x86_fp80 %x) nounwind {
-; CHECK-LABEL: testmsll:
-; CHECK: # %bb.0: # %entry
+define i64 @test_lrint_i64_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_lrint_i64_f80:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: fldt 8(%ebp)
+; X86-NEXT: fistpll (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f80:
+; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fistpll -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f80(x86_fp80 %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.lrint.i64.f80(x86_fp80 %x)
+ ret i64 %conv
}
; FIXME(#44744): incorrect libcall
-define i64 @testmsxq(fp128 %x) nounwind {
-; CHECK-LABEL: testmsxq:
-; CHECK: # %bb.0: # %entry
+define i64 @test_lrint_i64_f128(fp128 %x) nounwind {
+; X86-LABEL: test_lrint_i64_f128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll lrintl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f128:
+; CHECK: # %bb.0:
; CHECK-NEXT: jmp lrintl at PLT # TAILCALL
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
- ret i64 %0
+ %conv = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %conv
+}
+
+; FIXME: crash
+; define i64 @test_lrint_i64_f16_strict(half %x) nounwind {
+; %conv = tail call i64 @llvm.experimental.constrained.lrint.i64.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %conv
+; }
+
+define i64 @test_lrint_i64_f32_strict(float %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i64_f32_strict:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll lrintf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i64_f32_strict:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lrintf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f32_strict:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq lrintf at PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+ %conv = tail call i64 @llvm.experimental.constrained.lrint.i64.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %conv
+}
+
+define i64 @test_lrint_i64_f64_strict(double %x) nounwind {
+; X86-NOSSE-LABEL: test_lrint_i64_f64_strict:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: calll lrint
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lrint_i64_f64_strict:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lrint
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f64_strict:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq lrint at PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+ %conv = tail call i64 @llvm.experimental.constrained.lrint.i64.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %conv
+}
+
+define i64 @test_lrint_i64_f80_strict(x86_fp80 %x) nounwind {
+; X86-LABEL: test_lrint_i64_f80_strict:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: fldt {{[0-9]+}}(%esp)
+; X86-NEXT: fstpt (%esp)
+; X86-NEXT: calll lrintl
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f80_strict:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
+; CHECK-NEXT: fstpt (%rsp)
+; CHECK-NEXT: callq lrintl at PLT
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: retq
+ %conv = tail call i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %conv
+}
+
+; FIXME(#44744): incorrect libcall
+define i64 @test_lrint_i64_f128_strict(fp128 %x) nounwind {
+; X86-LABEL: test_lrint_i64_f128_strict:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll lrintl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; CHECK-LABEL: test_lrint_i64_f128_strict:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq lrintl at PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+ %conv = tail call i64 @llvm.experimental.constrained.lrint.i64.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+ ret i64 %conv
}
define i32 @PR125324(float %x) nounwind {
+; X86-NOSSE-LABEL: PR125324:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: flds 8(%ebp)
+; X86-NOSSE-NEXT: fistpll (%esp)
+; X86-NOSSE-NEXT: movl (%esp), %eax
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: PR125324:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: flds (%esp)
+; X86-SSE2-NEXT: fistpll (%esp)
+; X86-SSE2-NEXT: movl (%esp), %eax
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
; SSE-LABEL: PR125324:
-; SSE: # %bb.0: # %entry
+; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
; SSE-NEXT: # kill: def $eax killed $eax killed $rax
; SSE-NEXT: retq
;
; AVX-LABEL: PR125324:
-; AVX: # %bb.0: # %entry
+; AVX: # %bb.0:
; AVX-NEXT: vcvtss2si %xmm0, %rax
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
; AVX-NEXT: retq
-entry:
- %0 = tail call i64 @llvm.lrint.i64.f32(float %x)
- %1 = trunc i64 %0 to i32
- ret i32 %1
+ %conv = tail call i64 @llvm.lrint.i64.f32(float %x)
+ %trunc = trunc i64 %conv to i32
+ ret i32 %trunc
}
declare i64 @llvm.lrint.i64.f32(float) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/lround-conv-i32.ll b/llvm/test/CodeGen/X86/lround-conv-i32.ll
index c37536623143d..389f29233dcce 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i32.ll
@@ -1,17 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s
-; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86,X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
-define i32 @testmsws(float %x) nounwind {
-; CHECK-LABEL: testmsws:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundf # TAILCALL
+; FIXME: crash
+; define i32 @test_lround_i32_f16(half %x) nounwind {
+; %conv = tail call i32 @llvm.lround.i32.f16(half %x)
+; ret i32 %conv
+; }
+
+define i32 @test_lround_i32_f32(float %x) nounwind {
+; X86-LABEL: test_lround_i32_f32:
+; X86: # %bb.0:
+; X86-NEXT: jmp lroundf # TAILCALL
+;
+; X64-LABEL: test_lround_i32_f32:
+; X64: # %bb.0:
+; X64-NEXT: jmp lroundf at PLT # TAILCALL
;
-; GISEL-X86-LABEL: testmsws:
-; GISEL-X86: # %bb.0: # %entry
+; GISEL-X86-LABEL: test_lround_i32_f32:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl %eax, (%esp)
@@ -19,28 +29,27 @@ define i32 @testmsws(float %x) nounwind {
; GISEL-X86-NEXT: addl $12, %esp
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmsws:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp lroundf at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsws:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_lround_i32_f32:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
; GISEL-X64-NEXT: callq lroundf
; GISEL-X64-NEXT: popq %rcx
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lround.i32.f32(float %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lround.i32.f32(float %x)
+ ret i32 %conv
}
-define i32 @testmswd(double %x) nounwind {
-; CHECK-LABEL: testmswd:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lround # TAILCALL
+define i32 @test_lround_i32_f64(double %x) nounwind {
+; X86-LABEL: test_lround_i32_f64:
+; X86: # %bb.0:
+; X86-NEXT: jmp lround # TAILCALL
+;
+; X64-LABEL: test_lround_i32_f64:
+; X64: # %bb.0:
+; X64-NEXT: jmp lround at PLT # TAILCALL
;
-; GISEL-X86-LABEL: testmswd:
-; GISEL-X86: # %bb.0: # %entry
+; GISEL-X86-LABEL: test_lround_i32_f64:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
; GISEL-X86-NEXT: leal {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -53,28 +62,27 @@ define i32 @testmswd(double %x) nounwind {
; GISEL-X86-NEXT: addl $12, %esp
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmswd:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp lround at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmswd:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_lround_i32_f64:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: pushq %rax
; GISEL-X64-NEXT: callq lround
; GISEL-X64-NEXT: popq %rcx
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lround.i32.f64(double %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lround.i32.f64(double %x)
+ ret i32 %conv
}
-define i32 @testmsll(x86_fp80 %x) nounwind {
-; CHECK-LABEL: testmsll:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundl # TAILCALL
+define i32 @test_lround_i32_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_lround_i32_f80:
+; X86: # %bb.0:
+; X86-NEXT: jmp lroundl # TAILCALL
+;
+; X64-LABEL: test_lround_i32_f80:
+; X64: # %bb.0:
+; X64-NEXT: jmp lroundl at PLT # TAILCALL
;
-; GISEL-X86-LABEL: testmsll:
-; GISEL-X86: # %bb.0: # %entry
+; GISEL-X86-LABEL: test_lround_i32_f80:
+; GISEL-X86: # %bb.0:
; GISEL-X86-NEXT: subl $12, %esp
; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp)
; GISEL-X86-NEXT: fstpt (%esp)
@@ -82,19 +90,91 @@ define i32 @testmsll(x86_fp80 %x) nounwind {
; GISEL-X86-NEXT: addl $12, %esp
; GISEL-X86-NEXT: retl
;
-; X64-LABEL: testmsll:
-; X64: # %bb.0: # %entry
-; X64-NEXT: jmp lroundl at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsll:
-; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-LABEL: test_lround_i32_f80:
+; GISEL-X64: # %bb.0:
; GISEL-X64-NEXT: subq $24, %rsp
; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp)
; GISEL-X64-NEXT: fstpt (%rsp)
; GISEL-X64-NEXT: callq lroundl
; GISEL-X64-NEXT: addq $24, %rsp
; GISEL-X64-NEXT: retq
-entry:
- %0 = tail call i32 @llvm.lround.i32.f80(x86_fp80 %x)
- ret i32 %0
+ %conv = tail call i32 @llvm.lround.i32.f80(x86_fp80 %x)
+ ret i32 %conv
}
+
+define i32 @test_lround_i32_f128(fp128 %x) nounwind {
+; X86-LABEL: test_lround_i32_f128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll lroundl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_lround_i32_f128:
+; X64: # %bb.0:
+; X64-NEXT: jmp lroundl at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_lround_i32_f128:
+; GISEL-X86: # %bb.0:
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: subl $24, %esp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: calll lroundf128
+; GISEL-X86-NEXT: addl $24, %esp
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: retl
+;
+; GISEL-X64-LABEL: test_lround_i32_f128:
+; GISEL-X64: # %bb.0:
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: callq lroundf128
+; GISEL-X64-NEXT: popq %rcx
+; GISEL-X64-NEXT: retq
+ %conv = tail call i32 @llvm.lround.i32.f128(fp128 %x)
+ ret i32 %conv
+}
+
+; FIXME: not yet implemented in global isel
+; define i32 @test_lround_i32_f16_strict(half %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+
+; define i32 @test_lround_i32_f32_strict(float %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+
+; define i32 @test_lround_i32_f64_strict(double %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+
+; define i32 @test_lround_i32_f80_strict(x86_fp80 %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+
+; define i32 @test_lround_i32_f128_strict(fp128 %x) nounwind strictfp {
+; %conv = tail call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i32 %conv
+; }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X86-NOSSE: {{.*}}
+; X86-SSE2: {{.*}}
diff --git a/llvm/test/CodeGen/X86/lround-conv-i64.ll b/llvm/test/CodeGen/X86/lround-conv-i64.ll
index 36b86f30ca133..8b8230074728f 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i64.ll
@@ -1,42 +1,86 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefixes=X86,X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=X64
; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
-define i64 @testmsxs(float %x) {
-; GISEL-X86-LABEL: testmsxs:
+; FIXME: crash
+; define i64 @test_lround_i64_f16(half %x) nounwind {
+; entry:
+; %0 = tail call i64 @llvm.lround.i64.f16(half %x)
+; ret i64 %0
+; }
+
+define i64 @test_lround_i64_f32(float %x) nounwind {
+; X86-NOSSE-LABEL: test_lround_i64_f32:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll lroundf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lround_i64_f32:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lroundf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: test_lround_i64_f32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lroundf at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_lround_i64_f32:
; GISEL-X86: # %bb.0: # %entry
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl %eax, (%esp)
; GISEL-X86-NEXT: calll lroundf
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; CHECK-LABEL: testmsxs:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundf at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxs:
+; GISEL-X64-LABEL: test_lround_i64_f32:
; GISEL-X64: # %bb.0: # %entry
; GISEL-X64-NEXT: pushq %rax
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 16
; GISEL-X64-NEXT: callq lroundf
; GISEL-X64-NEXT: popq %rcx
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
entry:
%0 = tail call i64 @llvm.lround.i64.f32(float %x)
ret i64 %0
}
-define i64 @testmsxd(double %x) {
-; GISEL-X86-LABEL: testmsxd:
+define i64 @test_lround_i64_f64(double %x) nounwind {
+; X86-NOSSE-LABEL: test_lround_i64_f64:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl (%esp)
+; X86-NOSSE-NEXT: calll lround
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: test_lround_i64_f64:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: calll lround
+; X86-SSE2-NEXT: addl $8, %esp
+; X86-SSE2-NEXT: retl
+;
+; X64-LABEL: test_lround_i64_f64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lround at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_lround_i64_f64:
; GISEL-X86: # %bb.0: # %entry
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: leal {{[0-9]+}}(%esp), %eax
; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; GISEL-X86-NEXT: movl 4(%eax), %eax
@@ -46,53 +90,131 @@ define i64 @testmsxd(double %x) {
; GISEL-X86-NEXT: movl %eax, 4(%edx)
; GISEL-X86-NEXT: calll lround
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; CHECK-LABEL: testmsxd:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lround at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsxd:
+; GISEL-X64-LABEL: test_lround_i64_f64:
; GISEL-X64: # %bb.0: # %entry
; GISEL-X64-NEXT: pushq %rax
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 16
; GISEL-X64-NEXT: callq lround
; GISEL-X64-NEXT: popq %rcx
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
entry:
%0 = tail call i64 @llvm.lround.i64.f64(double %x)
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) {
-; GISEL-X86-LABEL: testmsll:
+define i64 @test_lround_i64_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_lround_i64_f80:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: fldt {{[0-9]+}}(%esp)
+; X86-NEXT: fstpt (%esp)
+; X86-NEXT: calll lroundl
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_lround_i64_f80:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lroundl at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_lround_i64_f80:
; GISEL-X86: # %bb.0: # %entry
; GISEL-X86-NEXT: subl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 16
; GISEL-X86-NEXT: fldt {{[0-9]+}}(%esp)
; GISEL-X86-NEXT: fstpt (%esp)
; GISEL-X86-NEXT: calll lroundl
; GISEL-X86-NEXT: addl $12, %esp
-; GISEL-X86-NEXT: .cfi_def_cfa_offset 4
; GISEL-X86-NEXT: retl
;
-; CHECK-LABEL: testmsll:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundl at PLT # TAILCALL
-;
-; GISEL-X64-LABEL: testmsll:
+; GISEL-X64-LABEL: test_lround_i64_f80:
; GISEL-X64: # %bb.0: # %entry
; GISEL-X64-NEXT: subq $24, %rsp
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 32
; GISEL-X64-NEXT: fldt {{[0-9]+}}(%rsp)
; GISEL-X64-NEXT: fstpt (%rsp)
; GISEL-X64-NEXT: callq lroundl
; GISEL-X64-NEXT: addq $24, %rsp
-; GISEL-X64-NEXT: .cfi_def_cfa_offset 8
; GISEL-X64-NEXT: retq
entry:
%0 = tail call i64 @llvm.lround.i64.f80(x86_fp80 %x)
ret i64 %0
}
+
+define i64 @test_lround_i64_f128(fp128 %x) nounwind {
+; X86-LABEL: test_lround_i64_f128:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: pushl 20(%ebp)
+; X86-NEXT: pushl 16(%ebp)
+; X86-NEXT: pushl 12(%ebp)
+; X86-NEXT: pushl 8(%ebp)
+; X86-NEXT: calll lroundl
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
+;
+; X64-LABEL: test_lround_i64_f128:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lroundl at PLT # TAILCALL
+;
+; GISEL-X86-LABEL: test_lround_i64_f128:
+; GISEL-X86: # %bb.0: # %entry
+; GISEL-X86-NEXT: pushl %esi
+; GISEL-X86-NEXT: subl $24, %esp
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; GISEL-X86-NEXT: movl %eax, (%esp)
+; GISEL-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT: calll lroundf128
+; GISEL-X86-NEXT: addl $24, %esp
+; GISEL-X86-NEXT: popl %esi
+; GISEL-X86-NEXT: retl
+;
+; GISEL-X64-LABEL: test_lround_i64_f128:
+; GISEL-X64: # %bb.0: # %entry
+; GISEL-X64-NEXT: pushq %rax
+; GISEL-X64-NEXT: callq lroundf128
+; GISEL-X64-NEXT: popq %rcx
+; GISEL-X64-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.lround.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
+; FIXME: not yet implemented in global isel
+; define i64 @test_lround_i64_f16_strict(half %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.lround.i64.f16(half %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
+
+; define i64 @test_lround_i64_f32_strict(float %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.lround.i64.f32(float %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
+
+; define i64 @test_lround_i64_f64_strict(double %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.lround.i64.f64(double %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
+
+; define i64 @test_lround_i64_f80_strict(x86_fp80 %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
+
+; define i64 @test_lround_i64_f128_strict(fp128 %x) nounwind strictfp {
+; entry:
+; %0 = tail call i64 @llvm.experimental.constrained.lround.i64.f128(fp128 %x, metadata!"round.dynamic", metadata!"fpexcept.strict")
+; ret i64 %0
+; }
More information about the llvm-commits
mailing list