[llvm] [Test] Add and update tests for `lrint`/`llrint` (NFC) (PR #152662)
Trevor Gross via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 8 04:27:10 PDT 2025
https://github.com/tgross35 updated https://github.com/llvm/llvm-project/pull/152662
>From c0e510ce1f6a398c7f39f7b2e8b55cb54a7059aa Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 8 Aug 2025 03:15:39 -0500
Subject: [PATCH 1/2] [Test] Add and update tests for lrint
A number of backends are missing either all tests for lrint, or
specifically those for f16 which currently crashes for `softPromoteHalf`
targets. For a number of popular backends, do the following:
* Ensure f16, f32, f64, and f128 are all covered
* Ensure both a 32- and 64-bit target are tested, if relevant
* Add `nounwind` to clean up CFI output
* Add a test covering the above if one did not exist
---
llvm/test/CodeGen/ARM/llrint-conv.ll | 21 ++++
llvm/test/CodeGen/ARM/lrint-conv.ll | 18 +++
llvm/test/CodeGen/AVR/llrint.ll | 18 +++
llvm/test/CodeGen/AVR/lrint.ll | 18 +++
llvm/test/CodeGen/LoongArch/lrint-conv.ll | 96 +++++++++++++++
llvm/test/CodeGen/MSP430/lrint-conv.ll | 60 +++++++++
llvm/test/CodeGen/Mips/llrint-conv.ll | 15 +++
llvm/test/CodeGen/Mips/lrint-conv.ll | 15 +++
llvm/test/CodeGen/PowerPC/llrint-conv.ll | 32 +++++
llvm/test/CodeGen/PowerPC/lrint-conv.ll | 32 +++++
llvm/test/CodeGen/RISCV/lrint-conv.ll | 76 ++++++++++++
llvm/test/CodeGen/SPARC/lrint-conv.ll | 68 +++++++++++
llvm/test/CodeGen/WebAssembly/lrint-conv.ll | 62 ++++++++++
llvm/test/CodeGen/X86/llrint-conv.ll | 128 +++++++++++++++-----
llvm/test/CodeGen/X86/lrint-conv-i32.ll | 74 +++++++++--
llvm/test/CodeGen/X86/lrint-conv-i64.ll | 34 +++++-
16 files changed, 723 insertions(+), 44 deletions(-)
create mode 100644 llvm/test/CodeGen/LoongArch/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/MSP430/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/RISCV/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/SPARC/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/WebAssembly/lrint-conv.ll
diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll
index 017955bb43afb..f0fb2e7543be6 100644
--- a/llvm/test/CodeGen/ARM/llrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/llrint-conv.ll
@@ -1,6 +1,16 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; SOFTFP-LABEL: testmsxh_builtin:
+; SOFTFP: bl llrintf
+; HARDFP-LABEL: testmsxh_builtin:
+; HARDFP: bl llrintf
+define i64 @testmsxh_builtin(half %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f16(half %x)
+ ret i64 %0
+}
+
; SOFTFP-LABEL: testmsxs_builtin:
; SOFTFP: bl llrintf
; HARDFP-LABEL: testmsxs_builtin:
@@ -21,5 +31,16 @@ entry:
ret i64 %0
}
+; FIXME(#44744): incorrect libcall
+; SOFTFP-LABEL: testmsxq_builtin:
+; SOFTFP: bl llrintl
+; HARDFP-LABEL: testmsxq_builtin:
+; HARDFP: bl llrintl
+define i64 @testmsxq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll
index 192da565c12fd..9aa95112af533 100644
--- a/llvm/test/CodeGen/ARM/lrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/lrint-conv.ll
@@ -1,6 +1,13 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; FIXME: crash
+; define i32 @testmswh_builtin(half %x) {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
; SOFTFP-LABEL: testmsws_builtin:
; SOFTFP: bl lrintf
; HARDFP-LABEL: testmsws_builtin:
@@ -21,5 +28,16 @@ entry:
ret i32 %0
}
+; FIXME(#44744): incorrect libcall
+; SOFTFP-LABEL: testmswq_builtin:
+; SOFTFP: bl lrintl
+; HARDFP-LABEL: testmswq_builtin:
+; HARDFP: bl lrintl
+define i32 @testmswq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/AVR/llrint.ll b/llvm/test/CodeGen/AVR/llrint.ll
index 32b4c7ab12a4b..c55664f2d7353 100644
--- a/llvm/test/CodeGen/AVR/llrint.ll
+++ b/llvm/test/CodeGen/AVR/llrint.ll
@@ -1,6 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=avr -mcpu=atmega328p | FileCheck %s
+; FIXME: crash "Input type needs to be promoted!"
+; define i64 @testmsxh_builtin(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
+
define i64 @testmsxs_builtin(float %x) {
; CHECK-LABEL: testmsxs_builtin:
; CHECK: ; %bb.0: ; %entry
@@ -21,5 +28,16 @@ entry:
ret i64 %0
}
+; FIXME(#44744): incorrect libcall
+define i64 @testmsxq_builtin(fp128 %x) {
+; CHECK-LABEL: testmsxq_builtin:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: call llrintl
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i64 @llvm.llrint.fp128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/AVR/lrint.ll b/llvm/test/CodeGen/AVR/lrint.ll
index d7568305f7b51..4ef656060bd10 100644
--- a/llvm/test/CodeGen/AVR/lrint.ll
+++ b/llvm/test/CodeGen/AVR/lrint.ll
@@ -1,6 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=avr -mcpu=atmega328p | FileCheck %s
+; FIXME: crash "Input type needs to be promoted!"
+; define i32 @testmswh_builtin(half %x) {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
define i32 @testmsws_builtin(float %x) {
; CHECK-LABEL: testmsws_builtin:
; CHECK: ; %bb.0: ; %entry
@@ -21,5 +28,16 @@ entry:
ret i32 %0
}
+; FIXME(#44744): incorrect libcall
+define i32 @testmswq_builtin(fp128 %x) {
+; CHECK-LABEL: testmswq_builtin:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: call lrint
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.fp128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/LoongArch/lrint-conv.ll b/llvm/test/CodeGen/LoongArch/lrint-conv.ll
new file mode 100644
index 0000000000000..85de820025614
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lrint-conv.ll
@@ -0,0 +1,96 @@
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=loongarch32 | FileCheck %s --check-prefixes=LA32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=loongarch32 | FileCheck %s --check-prefixes=LA32
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=loongarch64 | FileCheck %s --check-prefixes=LA64-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=loongarch64 | FileCheck %s --check-prefixes=LA64-I64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) nounwind {
+; LA32-LABEL: test_lrint_ixx_f32:
+; LA32: bl lrintf
+;
+; LA64-I32-LABEL: test_lrint_ixx_f32:
+; LA64-I32: pcaddu18i $ra, %call36(lrintf)
+;
+; LA64-I64-LABEL: test_lrint_ixx_f32:
+; LA64-I64: pcaddu18i $t8, %call36(lrintf)
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) nounwind {
+; LA32-LABEL: test_llrint_ixx_f32:
+; LA32: bl llrintf
+;
+; LA64-I32-LABEL: test_llrint_ixx_f32:
+; LA64-I32: pcaddu18i $ra, %call36(llrintf)
+;
+; LA64-I64-LABEL: test_llrint_ixx_f32:
+; LA64-I64: pcaddu18i $t8, %call36(llrintf)
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) nounwind {
+; LA32-LABEL: test_lrint_ixx_f64:
+; LA32: bl lrint
+;
+; LA64-I32-LABEL: test_lrint_ixx_f64:
+; LA64-I32: pcaddu18i $ra, %call36(lrint)
+;
+; LA64-I64-LABEL: test_lrint_ixx_f64:
+; LA64-I64: pcaddu18i $t8, %call36(lrint)
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) nounwind {
+; LA32-LABEL: test_llrint_ixx_f64:
+; LA32: bl llrint
+;
+; LA64-I32-LABEL: test_llrint_ixx_f64:
+; LA64-I32: pcaddu18i $ra, %call36(llrint)
+;
+; LA64-I64-LABEL: test_llrint_ixx_f64:
+; LA64-I64: pcaddu18i $t8, %call36(llrint)
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+; FIXME(#44744): incorrect libcall on loongarch32
+define ITy @test_lrint_ixx_f128(fp128 %x) nounwind {
+; LA32-LABEL: test_lrint_ixx_f128:
+; LA32: bl lrintl
+;
+; LA64-I32-LABEL: test_lrint_ixx_f128:
+; LA64-I32: pcaddu18i $ra, %call36(lrintl)
+;
+; LA64-I64-LABEL: test_lrint_ixx_f128:
+; LA64-I64: pcaddu18i $ra, %call36(lrintl)
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) nounwind {
+; LA32-LABEL: test_llrint_ixx_f128:
+; LA32: bl llrintl
+;
+; LA64-I32-LABEL: test_llrint_ixx_f128:
+; LA64-I32: pcaddu18i $ra, %call36(llrintl)
+;
+; LA64-I64-LABEL: test_llrint_ixx_f128:
+; LA64-I64: pcaddu18i $ra, %call36(llrintl)
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
diff --git a/llvm/test/CodeGen/MSP430/lrint-conv.ll b/llvm/test/CodeGen/MSP430/lrint-conv.ll
new file mode 100644
index 0000000000000..04ab2af6102a0
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/lrint-conv.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=msp430-unknown-unknown | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=msp430-unknown-unknown | FileCheck %s --check-prefixes=CHECK
+
+; FIXME: crash "Input type needs to be promoted!"
+; define ITy @test_lrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f32:
+; CHECK: call #lrintf
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f32:
+; CHECK: call #llrintf
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f64:
+; CHECK: call #lrint
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f64:
+; CHECK: call #llrint
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+; FIXME(#44744): incorrect libcall
+define ITy @test_lrint_ixx_f128(fp128 %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f128:
+; CHECK: call #lrintl
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f128:
+; CHECK: call #llrintl
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
diff --git a/llvm/test/CodeGen/Mips/llrint-conv.ll b/llvm/test/CodeGen/Mips/llrint-conv.ll
index dcb4e5657e80b..ee3c0d99253a6 100644
--- a/llvm/test/CodeGen/Mips/llrint-conv.ll
+++ b/llvm/test/CodeGen/Mips/llrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=mips64el -mattr=+soft-float | FileCheck %s
+; RUN: llc < %s -mtriple=mips -mattr=+soft-float | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
define signext i32 @testmsws(float %x) {
; CHECK-LABEL: testmsws:
diff --git a/llvm/test/CodeGen/Mips/lrint-conv.ll b/llvm/test/CodeGen/Mips/lrint-conv.ll
index bd3f7b3babe10..6d2e392675f1c 100644
--- a/llvm/test/CodeGen/Mips/lrint-conv.ll
+++ b/llvm/test/CodeGen/Mips/lrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=mips64el -mattr=+soft-float | FileCheck %s
+; RUN: llc < %s -mtriple=mips -mattr=+soft-float | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %0
+; }
define signext i32 @testmsws(float %x) {
; CHECK-LABEL: testmsws:
diff --git a/llvm/test/CodeGen/PowerPC/llrint-conv.ll b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
index daadf85b4085a..dcd3bd25a83c5 100644
--- a/llvm/test/CodeGen/PowerPC/llrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash "Input type needs to be promoted!"
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl llrintf
@@ -51,6 +66,23 @@ entry:
ret i64 %0
}
+; CHECK-LABEL: testmswq:
+; CHECK: bl llrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl llrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
declare i64 @llvm.llrint.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/PowerPC/lrint-conv.ll b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
index adfc994497323..bc77a200757f4 100644
--- a/llvm/test/CodeGen/PowerPC/lrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash "Input type needs to be promoted!"
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl lrintf
@@ -51,6 +66,23 @@ entry:
ret i64 %0
}
+; CHECK-LABEL: testmswq:
+; CHECK: bl lrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl lrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.lrint.i64.f32(float) nounwind readnone
declare i64 @llvm.lrint.i64.f64(double) nounwind readnone
declare i64 @llvm.lrint.i64.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/RISCV/lrint-conv.ll b/llvm/test/CodeGen/RISCV/lrint-conv.ll
new file mode 100644
index 0000000000000..d3af2153588a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/lrint-conv.ll
@@ -0,0 +1,76 @@
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) nounwind {
+; RV32-LABEL: test_lrint_ixx_f32:
+; RV32: call lrintf
+;
+; RV64-LABEL: test_lrint_ixx_f32:
+; RV64: call lrintf
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) nounwind {
+; RV32-LABEL: test_llrint_ixx_f32:
+; RV32: call llrintf
+;
+; RV64-LABEL: test_llrint_ixx_f32:
+; RV64: call llrintf
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) nounwind {
+; RV32-LABEL: test_lrint_ixx_f64:
+; RV32: call lrint
+;
+; RV64-LABEL: test_lrint_ixx_f64:
+; RV64: call lrint
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) nounwind {
+; RV32-LABEL: test_llrint_ixx_f64:
+; RV32: call llrint
+;
+; RV64-LABEL: test_llrint_ixx_f64:
+; RV64: call llrint
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+; FIXME(#44744): incorrect libcall on riscv32
+define ITy @test_lrint_ixx_f128(fp128 %x) nounwind {
+; RV32-LABEL: test_lrint_ixx_f128:
+; RV32: call lrintl
+;
+; RV64-LABEL: test_lrint_ixx_f128:
+; RV64: call lrintl
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) nounwind {
+; RV32-LABEL: test_llrint_ixx_f128:
+; RV32: call llrintl
+;
+; RV64-LABEL: test_llrint_ixx_f128:
+; RV64: call llrintl
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
diff --git a/llvm/test/CodeGen/SPARC/lrint-conv.ll b/llvm/test/CodeGen/SPARC/lrint-conv.ll
new file mode 100644
index 0000000000000..81934114f548f
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/lrint-conv.ll
@@ -0,0 +1,68 @@
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=sparc | FileCheck %s --check-prefixes=SPARC32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=sparc | FileCheck %s --check-prefixes=SPARC32
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=sparc64 | FileCheck %s --check-prefixes=SPARC64
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=sparc64 | FileCheck %s --check-prefixes=SPARC64
+
+; FIXME: crash "Input type needs to be promoted!"
+; define ITy @test_lrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) nounwind {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) nounwind {
+; SPARC32-LABEL: test_lrint_ixx_f32:
+; SPARC32: call lrintf
+;
+; SPARC64-LABEL: test_lrint_ixx_f32:
+; SPARC64: call lrintf
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) nounwind {
+; SPARC32-LABEL: test_llrint_ixx_f32:
+; SPARC32: call llrintf
+;
+; SPARC64-LABEL: test_llrint_ixx_f32:
+; SPARC64: call llrintf
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) nounwind {
+; SPARC32-LABEL: test_lrint_ixx_f64:
+; SPARC32: call lrint
+;
+; SPARC64-LABEL: test_lrint_ixx_f64:
+; SPARC64: call lrint
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) nounwind {
+; SPARC32-LABEL: test_llrint_ixx_f64:
+; SPARC32: call llrint
+;
+; SPARC64-LABEL: test_llrint_ixx_f64:
+; SPARC64: call llrint
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+; FIXME(#41838): unsupported type
+; define ITy @test_lrint_ixx_f128(fp128 %x) nounwind {
+; %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f128(fp128 %x) nounwind {
+; %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+; ret ITy %res
+; }
diff --git a/llvm/test/CodeGen/WebAssembly/lrint-conv.ll b/llvm/test/CodeGen/WebAssembly/lrint-conv.ll
new file mode 100644
index 0000000000000..0571150cb3505
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/lrint-conv.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=wasm32-unknown-unknown | FileCheck %s
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=wasm32-unknown-unknown | FileCheck %s
+
+define ITy @test_lrint_ixx_f16(half %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f16:
+; CHECK: call lrintf
+ %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f16(half %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f16:
+; CHECK: call llrintf
+ %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f32(float %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f32:
+; CHECK: call lrintf
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f32:
+; CHECK: call llrintf
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f64:
+; CHECK: call lrint
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f64:
+; CHECK: call llrint
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f128(fp128 %x) nounwind {
+; CHECK-LABEL: test_lrint_ixx_f128:
+; CHECK: call lrintl
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) nounwind {
+; CHECK-LABEL: test_llrint_ixx_f128:
+; CHECK: call llrintl
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
diff --git a/llvm/test/CodeGen/X86/llrint-conv.ll b/llvm/test/CodeGen/X86/llrint-conv.ll
index 402daf80a15e8..d3eca5197a94b 100644
--- a/llvm/test/CodeGen/X86/llrint-conv.ll
+++ b/llvm/test/CodeGen/X86/llrint-conv.ll
@@ -7,14 +7,50 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
-define i64 @testmsxs(float %x) {
+define i64 @testmsxh(half %x) nounwind {
+; X86-NOSSE-LABEL: testmsxh:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: calll __extendhfsf2
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll llrintf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmsxh:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-SSE2-NEXT: pextrw $0, %xmm0, %eax
+; X86-SSE2-NEXT: movw %ax, (%esp)
+; X86-SSE2-NEXT: calll __extendhfsf2
+; X86-SSE2-NEXT: fstps (%esp)
+; X86-SSE2-NEXT: calll llrintf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE-LABEL: testmsxh:
+; X64-SSE: # %bb.0: # %entry
+; X64-SSE-NEXT: pushq %rax
+; X64-SSE-NEXT: callq __extendhfsf2 at PLT
+; X64-SSE-NEXT: callq rintf at PLT
+; X64-SSE-NEXT: callq __truncsfhf2 at PLT
+; X64-SSE-NEXT: callq __extendhfsf2 at PLT
+; X64-SSE-NEXT: cvttss2si %xmm0, %rax
+; X64-SSE-NEXT: popq %rcx
+; X64-SSE-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.llrint.f16(half %x)
+ ret i64 %0
+}
+
+define i64 @testmsxs(float %x) nounwind {
; X86-NOSSE-LABEL: testmsxs:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: .cfi_offset %ebp, -8
; X86-NOSSE-NEXT: movl %esp, %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_register %ebp
; X86-NOSSE-NEXT: andl $-8, %esp
; X86-NOSSE-NEXT: subl $8, %esp
; X86-NOSSE-NEXT: flds 8(%ebp)
@@ -23,16 +59,12 @@ define i64 @testmsxs(float %x) {
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl %ebp, %esp
; X86-NOSSE-NEXT: popl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa %esp, 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsxs:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -43,16 +75,12 @@ define i64 @testmsxs(float %x) {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: testmsxs:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -63,7 +91,6 @@ define i64 @testmsxs(float %x) {
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: testmsxs:
@@ -80,14 +107,11 @@ entry:
ret i64 %0
}
-define i64 @testmsxd(double %x) {
+define i64 @testmsxd(double %x) nounwind {
; X86-NOSSE-LABEL: testmsxd:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: .cfi_offset %ebp, -8
; X86-NOSSE-NEXT: movl %esp, %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_register %ebp
; X86-NOSSE-NEXT: andl $-8, %esp
; X86-NOSSE-NEXT: subl $8, %esp
; X86-NOSSE-NEXT: fldl 8(%ebp)
@@ -96,16 +120,12 @@ define i64 @testmsxd(double %x) {
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl %ebp, %esp
; X86-NOSSE-NEXT: popl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa %esp, 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsxd:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -116,16 +136,12 @@ define i64 @testmsxd(double %x) {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: testmsxd:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -136,7 +152,6 @@ define i64 @testmsxd(double %x) {
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: testmsxd:
@@ -153,14 +168,11 @@ entry:
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) {
+define i64 @testmsll(x86_fp80 %x) nounwind {
; X86-LABEL: testmsll:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %ebp, -8
; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: .cfi_def_cfa_register %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: fldt 8(%ebp)
@@ -169,7 +181,6 @@ define i64 @testmsll(x86_fp80 %x) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
-; X86-NEXT: .cfi_def_cfa %esp, 4
; X86-NEXT: retl
;
; X64-LABEL: testmsll:
@@ -183,6 +194,61 @@ entry:
ret i64 %0
}
+; FIXME(#44744): incorrect libcall
+define i64 @testmslq(fp128 %x) nounwind {
+; X86-NOSSE-LABEL: testmslq:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll llrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmslq:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll llrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: testmslq:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll llrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: testmslq:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp llrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i64 @llvm.llrint.fp128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
declare i64 @llvm.llrint.f80(x86_fp80) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i32.ll b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
index 21580f53ec9b3..3c50aea1095f4 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
@@ -7,16 +7,21 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
-define i32 @testmsws(float %x) {
+; FIXME: crash
+; define i32 @testmswh(half %x) nounwind {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
+define i32 @testmsws(float %x) nounwind {
; X86-NOSSE-LABEL: testmsws:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %eax
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
; X86-NOSSE-NEXT: movl (%esp), %eax
; X86-NOSSE-NEXT: popl %ecx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsws:
@@ -43,16 +48,14 @@ entry:
ret i32 %0
}
-define i32 @testmswd(double %x) {
+define i32 @testmswd(double %x) nounwind {
; X86-NOSSE-LABEL: testmswd:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %eax
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
; X86-NOSSE-NEXT: movl (%esp), %eax
; X86-NOSSE-NEXT: popl %ecx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmswd:
@@ -79,16 +82,14 @@ entry:
ret i32 %0
}
-define i32 @testmsll(x86_fp80 %x) {
+define i32 @testmsll(x86_fp80 %x) nounwind {
; X86-LABEL: testmsll:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %eax
-; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fistpl (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: popl %ecx
-; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-LABEL: testmsll:
@@ -102,6 +103,61 @@ entry:
ret i32 %0
}
+; FIXME(#44744): incorrect libcall
+define i32 @testmswq(fp128 %x) nounwind {
+; X86-NOSSE-LABEL: testmswq:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll lrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmswq:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll lrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: testmswq:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll lrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: testmswq:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
declare i32 @llvm.lrint.i32.f80(x86_fp80) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i64.ll b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
index 38fa09085e189..2ba1500df0b6e 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
@@ -3,7 +3,23 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
-define i64 @testmsxs(float %x) {
+define i64 @testmsxh(half %x) nounwind {
+; SSE-LABEL: testmsxh:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rax
+; SSE-NEXT: callq __extendhfsf2 at PLT
+; SSE-NEXT: callq rintf at PLT
+; SSE-NEXT: callq __truncsfhf2 at PLT
+; SSE-NEXT: callq __extendhfsf2 at PLT
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: popq %rcx
+; SSE-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+ ret i64 %0
+}
+
+define i64 @testmsxs(float %x) nounwind {
; SSE-LABEL: testmsxs:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -18,7 +34,7 @@ entry:
ret i64 %0
}
-define i64 @testmsxd(double %x) {
+define i64 @testmsxd(double %x) nounwind {
; SSE-LABEL: testmsxd:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtsd2si %xmm0, %rax
@@ -33,7 +49,7 @@ entry:
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) {
+define i64 @testmsll(x86_fp80 %x) nounwind {
; CHECK-LABEL: testmsll:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@@ -45,7 +61,17 @@ entry:
ret i64 %0
}
-define i32 @PR125324(float %x) {
+; FIXME(#44744): incorrect libcall
+define i64 @testmsxq(fp128 %x) nounwind {
+; CHECK-LABEL: testmsxq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: jmp lrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
+define i32 @PR125324(float %x) nounwind {
; SSE-LABEL: PR125324:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2si %xmm0, %rax
>From 6ab0fe9c09e317f6c0c8cf80c37c7ca93c3196fb Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 8 Aug 2025 06:26:56 -0500
Subject: [PATCH 2/2] Update existing vector tests
---
llvm/test/CodeGen/PowerPC/vector-llrint.ll | 4 +
llvm/test/CodeGen/PowerPC/vector-lrint.ll | 8 +
llvm/test/CodeGen/X86/vector-llrint-f16.ll | 3 +
llvm/test/CodeGen/X86/vector-llrint.ll | 573 ++++++++++++++++++
llvm/test/CodeGen/X86/vector-lrint.ll | 650 +++++++++++++++++++++
5 files changed, 1238 insertions(+)
diff --git a/llvm/test/CodeGen/PowerPC/vector-llrint.ll b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
index 9229fefced67e..7085cf51916da 100644
--- a/llvm/test/CodeGen/PowerPC/vector-llrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
@@ -1,4 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: llc -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; SKIP: -mtriple=powerpc-unknown-unknown -verify-machineinstrs < %s | \
+; SKIP: FileCheck %s --check-prefix=PPC32
; RUN: llc -mcpu=pwr7 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64-unknown-unknown -verify-machineinstrs < %s | \
; RUN: FileCheck %s --check-prefix=BE
diff --git a/llvm/test/CodeGen/PowerPC/vector-lrint.ll b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
index c2576d4631db8..b2ade5300dbc3 100644
--- a/llvm/test/CodeGen/PowerPC/vector-lrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
@@ -1,4 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: sed 's/iXLen/i32/g' %s | llc -ppc-asm-full-reg-names \
+; SKIP: -ppc-vsr-nums-as-vr -mtriple=powerpc-unknown-unknown \
+; SKIP: -verify-machineinstrs | FileCheck %s --check-prefixes=PPC32
; RUN: sed 's/iXLen/i32/g' %s | llc -mcpu=pwr7 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=BE
@@ -9,6 +13,10 @@
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs --enable-unsafe-fp-math | \
; RUN: FileCheck %s --check-prefixes=FAST
+; FIXME: crash "Input type needs to be promoted!"
+; SKIP: sed 's/iXLen/i64/g' %s | llc -ppc-asm-full-reg-names \
+; SKIP: -ppc-vsr-nums-as-vr -mtriple=powerpc-unknown-unknown \
+; SKIP: -verify-machineinstrs | FileCheck %s --check-prefixes=PPC32
; RUN: sed 's/iXLen/i64/g' %s | llc -mcpu=pwr7 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=BE
diff --git a/llvm/test/CodeGen/X86/vector-llrint-f16.ll b/llvm/test/CodeGen/X86/vector-llrint-f16.ll
index 5e5c5849fc22e..d6a21e1c00502 100644
--- a/llvm/test/CodeGen/X86/vector-llrint-f16.ll
+++ b/llvm/test/CodeGen/X86/vector-llrint-f16.ll
@@ -1,4 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; FIXME: crash "Do not know how to split the result of this operator!"
+; SKIP: sed 's/XRINT/lrint/g' %s | llc -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
+; SKIP: sed 's/XRINT/llrint/g' %s | llc -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
; RUN: sed 's/XRINT/lrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx2,f16c | FileCheck %s --check-prefix=AVX
; RUN: sed 's/XRINT/llrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx2,f16c | FileCheck %s --check-prefix=AVX
; RUN: sed 's/XRINT/lrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512fp16,avx512vl | FileCheck %s --check-prefix=FP16
diff --git a/llvm/test/CodeGen/X86/vector-llrint.ll b/llvm/test/CodeGen/X86/vector-llrint.ll
index 7017eb60df41d..08ee748497650 100644
--- a/llvm/test/CodeGen/X86/vector-llrint.ll
+++ b/llvm/test/CodeGen/X86/vector-llrint.ll
@@ -1,10 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=AVX,AVX512
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512dq,avx512vl | FileCheck %s --check-prefixes=AVX512DQ
define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
+; X86-LABEL: llrint_v1i64_v1f32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: flds 8(%ebp)
+; X86-NEXT: fistpll (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl
+;
; SSE-LABEL: llrint_v1i64_v1f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -25,6 +44,39 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
+; X86-LABEL: llrint_v2i64_v2f32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: .cfi_offset %esi, -16
+; X86-NEXT: .cfi_offset %edi, -12
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: flds 16(%ebp)
+; X86-NEXT: flds 12(%ebp)
+; X86-NEXT: fistpll (%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -8(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v2i64_v2f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -56,6 +108,60 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
+; X86-LABEL: llrint_v4i64_v4f32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $56, %esp
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: flds 24(%ebp)
+; X86-NEXT: flds 20(%ebp)
+; X86-NEXT: flds 16(%ebp)
+; X86-NEXT: flds 12(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, 28(%eax)
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl %edx, 20(%eax)
+; X86-NEXT: movl %ebx, 16(%eax)
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v4i64_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -122,6 +228,100 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
+; X86-LABEL: llrint_v8i64_v8f32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $120, %esp
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: flds 12(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 16(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 20(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 24(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 28(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 32(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 36(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 40(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl %ebx, 60(%eax)
+; X86-NEXT: movl %ecx, 56(%eax)
+; X86-NEXT: movl %edx, 52(%eax)
+; X86-NEXT: movl %esi, 48(%eax)
+; X86-NEXT: movl %edi, 44(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 40(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 36(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v8i64_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm2
@@ -236,6 +436,180 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
+; X86-LABEL: llrint_v16i64_v16f32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $248, %esp
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: flds 12(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 16(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 20(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 24(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 28(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 32(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 36(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 40(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 44(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 48(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 52(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 56(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 60(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 64(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 68(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: flds 72(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl %ebx, 124(%eax)
+; X86-NEXT: movl %ecx, 120(%eax)
+; X86-NEXT: movl %edx, 116(%eax)
+; X86-NEXT: movl %esi, 112(%eax)
+; X86-NEXT: movl %edi, 108(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 104(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 100(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 96(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 92(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 88(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 84(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 80(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 76(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 72(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 68(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 64(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 60(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 56(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 52(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 48(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 44(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 40(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 36(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v16i64_v16f32:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %rax
@@ -452,6 +826,24 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
+; X86-LABEL: llrint_v1i64_v1f64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: fldl 8(%ebp)
+; X86-NEXT: fistpll (%esp)
+; X86-NEXT: movl (%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl
+;
; SSE-LABEL: llrint_v1i64_v1f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
@@ -472,6 +864,39 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
+; X86-LABEL: llrint_v2i64_v2f64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: .cfi_offset %esi, -16
+; X86-NEXT: .cfi_offset %edi, -12
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: fldl 20(%ebp)
+; X86-NEXT: fldl 12(%ebp)
+; X86-NEXT: fistpll (%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl (%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -8(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v2i64_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
@@ -503,6 +928,60 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
+; X86-LABEL: llrint_v4i64_v4f64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $56, %esp
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: fldl 36(%ebp)
+; X86-NEXT: fldl 28(%ebp)
+; X86-NEXT: fldl 20(%ebp)
+; X86-NEXT: fldl 12(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, 28(%eax)
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl %edx, 20(%eax)
+; X86-NEXT: movl %ebx, 16(%eax)
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v4i64_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
@@ -567,6 +1046,100 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
+; X86-LABEL: llrint_v8i64_v8f64:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %ebp, -8
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: .cfi_def_cfa_register %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: andl $-8, %esp
+; X86-NEXT: subl $120, %esp
+; X86-NEXT: .cfi_offset %esi, -20
+; X86-NEXT: .cfi_offset %edi, -16
+; X86-NEXT: .cfi_offset %ebx, -12
+; X86-NEXT: fldl 12(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 20(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 28(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 36(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 44(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 52(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 60(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: fldl 68(%ebp)
+; X86-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NEXT: movl 8(%ebp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl %ebx, 60(%eax)
+; X86-NEXT: movl %ecx, 56(%eax)
+; X86-NEXT: movl %edx, 52(%eax)
+; X86-NEXT: movl %esi, 48(%eax)
+; X86-NEXT: movl %edi, 44(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 40(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 36(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: .cfi_def_cfa %esp, 4
+; X86-NEXT: retl $4
+;
; SSE-LABEL: llrint_v8i64_v8f64:
; SSE: # %bb.0:
; SSE-NEXT: cvtsd2si %xmm0, %rax
diff --git a/llvm/test/CodeGen/X86/vector-lrint.ll b/llvm/test/CodeGen/X86/vector-lrint.ll
index b1c8d46f497f3..a4c50e539d661 100644
--- a/llvm/test/CodeGen/X86/vector-lrint.ll
+++ b/llvm/test/CodeGen/X86/vector-lrint.ll
@@ -1,4 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown | FileCheck %s --check-prefix=X86-I32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=i686-unknown | FileCheck %s --check-prefix=X86-I64
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=X86-SSE2
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=avx | FileCheck %s --check-prefixes=X86-AVX,X86-AVX1
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X86-AVX,AVX512-i32
@@ -11,6 +13,35 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512dq,avx512vl | FileCheck %s --check-prefixes=X64-AVX-i64,AVX512DQ-i64
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
+; X86-I32-LABEL: lrint_v1f32:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %eax
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: movl (%esp), %eax
+; X86-I32-NEXT: popl %ecx
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl
+;
+; X86-I64-LABEL: lrint_v1f32:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $8, %esp
+; X86-I64-NEXT: flds 8(%ebp)
+; X86-I64-NEXT: fistpll (%esp)
+; X86-I64-NEXT: movl (%esp), %eax
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl %ebp, %esp
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl
+;
; X86-SSE2-LABEL: lrint_v1f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtss2si {{[0-9]+}}(%esp), %eax
@@ -36,6 +67,53 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
+; X86-I32-LABEL: lrint_v2f32:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: subl $8, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl (%esp), %eax
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: addl $8, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl
+;
+; X86-I64-LABEL: lrint_v2f32:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $16, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -16
+; X86-I64-NEXT: .cfi_offset %edi, -12
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: flds 16(%ebp)
+; X86-I64-NEXT: flds 12(%ebp)
+; X86-I64-NEXT: fistpll (%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl (%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl %edi, 12(%eax)
+; X86-I64-NEXT: movl %esi, 8(%eax)
+; X86-I64-NEXT: movl %edx, 4(%eax)
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -8(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v2f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtps2dq %xmm0, %xmm0
@@ -81,6 +159,95 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
+; X86-I32-LABEL: lrint_v4f32:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: pushl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: subl $16, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 28
+; X86-I32-NEXT: .cfi_offset %esi, -12
+; X86-I32-NEXT: .cfi_offset %edi, -8
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl (%esp), %ecx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I32-NEXT: movl %edi, 12(%eax)
+; X86-I32-NEXT: movl %esi, 8(%eax)
+; X86-I32-NEXT: movl %edx, 4(%eax)
+; X86-I32-NEXT: movl %ecx, (%eax)
+; X86-I32-NEXT: addl $16, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: popl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: popl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl $4
+;
+; X86-I64-LABEL: lrint_v4f32:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %ebx
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $56, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -20
+; X86-I64-NEXT: .cfi_offset %edi, -16
+; X86-I64-NEXT: .cfi_offset %ebx, -12
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: flds 24(%ebp)
+; X86-I64-NEXT: flds 20(%ebp)
+; X86-I64-NEXT: flds 16(%ebp)
+; X86-I64-NEXT: flds 12(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl %esi, 28(%eax)
+; X86-I64-NEXT: movl %ecx, 24(%eax)
+; X86-I64-NEXT: movl %edx, 20(%eax)
+; X86-I64-NEXT: movl %ebx, 16(%eax)
+; X86-I64-NEXT: movl %edi, 12(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 8(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 4(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -12(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebx
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v4f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtps2dq %xmm0, %xmm0
@@ -142,6 +309,165 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>)
define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
+; X86-I32-LABEL: lrint_v8f32:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %ebp
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: pushl %ebx
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: pushl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 16
+; X86-I32-NEXT: pushl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 20
+; X86-I32-NEXT: subl $40, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 60
+; X86-I32-NEXT: .cfi_offset %esi, -20
+; X86-I32-NEXT: .cfi_offset %edi, -16
+; X86-I32-NEXT: .cfi_offset %ebx, -12
+; X86-I32-NEXT: .cfi_offset %ebp, -8
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: flds {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: movl %edx, 28(%eax)
+; X86-I32-NEXT: movl %ecx, 24(%eax)
+; X86-I32-NEXT: movl %ebp, 20(%eax)
+; X86-I32-NEXT: movl %ebx, 16(%eax)
+; X86-I32-NEXT: movl %edi, 12(%eax)
+; X86-I32-NEXT: movl %esi, 8(%eax)
+; X86-I32-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-I32-NEXT: movl %ecx, 4(%eax)
+; X86-I32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I32-NEXT: movl %ecx, (%eax)
+; X86-I32-NEXT: addl $40, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 20
+; X86-I32-NEXT: popl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 16
+; X86-I32-NEXT: popl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: popl %ebx
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: popl %ebp
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl $4
+;
+; X86-I64-LABEL: lrint_v8f32:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %ebx
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $120, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -20
+; X86-I64-NEXT: .cfi_offset %edi, -16
+; X86-I64-NEXT: .cfi_offset %ebx, -12
+; X86-I64-NEXT: flds 12(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 16(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 20(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 24(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 28(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 32(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 36(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: flds 40(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I64-NEXT: movl %ebx, 60(%eax)
+; X86-I64-NEXT: movl %ecx, 56(%eax)
+; X86-I64-NEXT: movl %edx, 52(%eax)
+; X86-I64-NEXT: movl %esi, 48(%eax)
+; X86-I64-NEXT: movl %edi, 44(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 40(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 36(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 32(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 28(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 24(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 20(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 16(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 12(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 8(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 4(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -12(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebx
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v8f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtps2dq %xmm0, %xmm0
@@ -242,6 +568,35 @@ define <16 x iXLen> @lrint_v16iXLen_v16f32(<16 x float> %x) {
declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
+; X86-I32-LABEL: lrint_v1f64:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %eax
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: movl (%esp), %eax
+; X86-I32-NEXT: popl %ecx
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl
+;
+; X86-I64-LABEL: lrint_v1f64:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $8, %esp
+; X86-I64-NEXT: fldl 8(%ebp)
+; X86-I64-NEXT: fistpll (%esp)
+; X86-I64-NEXT: movl (%esp), %eax
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl %ebp, %esp
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl
+;
; X86-SSE2-LABEL: lrint_v1f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtsd2si {{[0-9]+}}(%esp), %eax
@@ -267,6 +622,53 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>)
define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
+; X86-I32-LABEL: lrint_v2f64:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: subl $8, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl (%esp), %eax
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: addl $8, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl
+;
+; X86-I64-LABEL: lrint_v2f64:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $16, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -16
+; X86-I64-NEXT: .cfi_offset %edi, -12
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: fldl 20(%ebp)
+; X86-I64-NEXT: fldl 12(%ebp)
+; X86-I64-NEXT: fistpll (%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl (%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl %edi, 12(%eax)
+; X86-I64-NEXT: movl %esi, 8(%eax)
+; X86-I64-NEXT: movl %edx, 4(%eax)
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -8(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v2f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtpd2dq %xmm0, %xmm0
@@ -312,6 +714,95 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>)
define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
+; X86-I32-LABEL: lrint_v4f64:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: pushl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: subl $16, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 28
+; X86-I32-NEXT: .cfi_offset %esi, -12
+; X86-I32-NEXT: .cfi_offset %edi, -8
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl (%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl (%esp), %ecx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I32-NEXT: movl %edi, 12(%eax)
+; X86-I32-NEXT: movl %esi, 8(%eax)
+; X86-I32-NEXT: movl %edx, 4(%eax)
+; X86-I32-NEXT: movl %ecx, (%eax)
+; X86-I32-NEXT: addl $16, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: popl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: popl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl $4
+;
+; X86-I64-LABEL: lrint_v4f64:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %ebx
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $56, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -20
+; X86-I64-NEXT: .cfi_offset %edi, -16
+; X86-I64-NEXT: .cfi_offset %ebx, -12
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: fldl 36(%ebp)
+; X86-I64-NEXT: fldl 28(%ebp)
+; X86-I64-NEXT: fldl 20(%ebp)
+; X86-I64-NEXT: fldl 12(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl %esi, 28(%eax)
+; X86-I64-NEXT: movl %ecx, 24(%eax)
+; X86-I64-NEXT: movl %edx, 20(%eax)
+; X86-I64-NEXT: movl %ebx, 16(%eax)
+; X86-I64-NEXT: movl %edi, 12(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 8(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 4(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -12(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebx
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v4f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtpd2dq %xmm1, %xmm1
@@ -377,6 +868,165 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>)
define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
+; X86-I32-LABEL: lrint_v8f64:
+; X86-I32: # %bb.0:
+; X86-I32-NEXT: pushl %ebp
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: pushl %ebx
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: pushl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 16
+; X86-I32-NEXT: pushl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 20
+; X86-I32-NEXT: subl $40, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 60
+; X86-I32-NEXT: .cfi_offset %esi, -20
+; X86-I32-NEXT: .cfi_offset %edi, -16
+; X86-I32-NEXT: .cfi_offset %ebx, -12
+; X86-I32-NEXT: .cfi_offset %ebp, -8
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: fistpl {{[0-9]+}}(%esp)
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I32-NEXT: movl %edx, 28(%eax)
+; X86-I32-NEXT: movl %ecx, 24(%eax)
+; X86-I32-NEXT: movl %ebp, 20(%eax)
+; X86-I32-NEXT: movl %ebx, 16(%eax)
+; X86-I32-NEXT: movl %edi, 12(%eax)
+; X86-I32-NEXT: movl %esi, 8(%eax)
+; X86-I32-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-I32-NEXT: movl %ecx, 4(%eax)
+; X86-I32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I32-NEXT: movl %ecx, (%eax)
+; X86-I32-NEXT: addl $40, %esp
+; X86-I32-NEXT: .cfi_def_cfa_offset 20
+; X86-I32-NEXT: popl %esi
+; X86-I32-NEXT: .cfi_def_cfa_offset 16
+; X86-I32-NEXT: popl %edi
+; X86-I32-NEXT: .cfi_def_cfa_offset 12
+; X86-I32-NEXT: popl %ebx
+; X86-I32-NEXT: .cfi_def_cfa_offset 8
+; X86-I32-NEXT: popl %ebp
+; X86-I32-NEXT: .cfi_def_cfa_offset 4
+; X86-I32-NEXT: retl $4
+;
+; X86-I64-LABEL: lrint_v8f64:
+; X86-I64: # %bb.0:
+; X86-I64-NEXT: pushl %ebp
+; X86-I64-NEXT: .cfi_def_cfa_offset 8
+; X86-I64-NEXT: .cfi_offset %ebp, -8
+; X86-I64-NEXT: movl %esp, %ebp
+; X86-I64-NEXT: .cfi_def_cfa_register %ebp
+; X86-I64-NEXT: pushl %ebx
+; X86-I64-NEXT: pushl %edi
+; X86-I64-NEXT: pushl %esi
+; X86-I64-NEXT: andl $-8, %esp
+; X86-I64-NEXT: subl $120, %esp
+; X86-I64-NEXT: .cfi_offset %esi, -20
+; X86-I64-NEXT: .cfi_offset %edi, -16
+; X86-I64-NEXT: .cfi_offset %ebx, -12
+; X86-I64-NEXT: fldl 12(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 20(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 28(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 36(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 44(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 52(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 60(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: fldl 68(%ebp)
+; X86-I64-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-I64-NEXT: movl 8(%ebp), %eax
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-I64-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-I64-NEXT: movl %ebx, 60(%eax)
+; X86-I64-NEXT: movl %ecx, 56(%eax)
+; X86-I64-NEXT: movl %edx, 52(%eax)
+; X86-I64-NEXT: movl %esi, 48(%eax)
+; X86-I64-NEXT: movl %edi, 44(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 40(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 36(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 32(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 28(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 24(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 20(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 16(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 12(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 8(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, 4(%eax)
+; X86-I64-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-I64-NEXT: movl %ecx, (%eax)
+; X86-I64-NEXT: leal -12(%ebp), %esp
+; X86-I64-NEXT: popl %esi
+; X86-I64-NEXT: popl %edi
+; X86-I64-NEXT: popl %ebx
+; X86-I64-NEXT: popl %ebp
+; X86-I64-NEXT: .cfi_def_cfa %esp, 4
+; X86-I64-NEXT: retl $4
+;
; X86-SSE2-LABEL: lrint_v8f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
More information about the llvm-commits
mailing list