[llvm] [Test] Add and update tests for lrint (PR #152662)
Trevor Gross via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 8 01:20:28 PDT 2025
https://github.com/tgross35 created https://github.com/llvm/llvm-project/pull/152662
A number of backends are missing either all tests for lrint, or specifically those for f16 which currently crashes for `softPromoteHalf` targets. For a number of popular backends, do the following:
* Ensure f16, f32, f64, and f128 are all covered
* Ensure both a 32- and 64-bit target are tested, if relevant
* Add `nounwind` to clean up CFI output
* Add a test covering the above if one did not exist
>From d7a8ac2a2ebe9f43bc61822bb4db659001b67d08 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 8 Aug 2025 03:15:39 -0500
Subject: [PATCH] [Test] Add and update tests for lrint
A number of backends are missing either all tests for lrint, or
specifically those for f16 which currently crashes for `softPromoteHalf`
targets. For a number of popular backends, do the following:
* Ensure f16, f32, f64, and f128 are all covered
* Ensure both a 32- and 64-bit target are tested, if relevant
* Add `nounwind` to clean up CFI output
* Add a test covering the above if one did not exist
---
llvm/test/CodeGen/ARM/llrint-conv.ll | 20 ++
llvm/test/CodeGen/ARM/lrint-conv.ll | 18 ++
llvm/test/CodeGen/AVR/llrint.ll | 18 ++
llvm/test/CodeGen/AVR/lrint.ll | 20 ++
llvm/test/CodeGen/LoongArch/lrint-conv.ll | 269 ++++++++++++++++++
llvm/test/CodeGen/MSP430/lrint-conv.ll | 143 ++++++++++
llvm/test/CodeGen/Mips/llrint-conv.ll | 15 +
llvm/test/CodeGen/Mips/lrint-conv.ll | 15 +
llvm/test/CodeGen/PowerPC/llrint-conv.ll | 32 +++
llvm/test/CodeGen/PowerPC/lrint-conv.ll | 32 +++
llvm/test/CodeGen/RISCV/lrint-conv.ll | 297 ++++++++++++++++++++
llvm/test/CodeGen/SPARC/lrint-conv.ll | 194 +++++++++++++
llvm/test/CodeGen/WebAssembly/lrint-conv.ll | 164 +++++++++++
llvm/test/CodeGen/X86/llrint-conv.ll | 129 +++++++--
llvm/test/CodeGen/X86/lrint-conv-i32.ll | 75 ++++-
llvm/test/CodeGen/X86/lrint-conv-i64.ll | 35 ++-
16 files changed, 1432 insertions(+), 44 deletions(-)
create mode 100644 llvm/test/CodeGen/LoongArch/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/MSP430/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/RISCV/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/SPARC/lrint-conv.ll
create mode 100644 llvm/test/CodeGen/WebAssembly/lrint-conv.ll
diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll
index 017955bb43afb..680086477468b 100644
--- a/llvm/test/CodeGen/ARM/llrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/llrint-conv.ll
@@ -1,6 +1,16 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; SOFTFP-LABEL: testmsxh_builtin:
+; SOFTFP: bl llrintf
+; HARDFP-LABEL: testmsxh_builtin:
+; HARDFP: bl llrintf
+define i64 @testmsxh_builtin(half %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f16(half %x)
+ ret i64 %0
+}
+
; SOFTFP-LABEL: testmsxs_builtin:
; SOFTFP: bl llrintf
; HARDFP-LABEL: testmsxs_builtin:
@@ -21,5 +31,15 @@ entry:
ret i64 %0
}
+; SOFTFP-LABEL: testmsxq_builtin:
+; SOFTFP: bl llrintl
+; HARDFP-LABEL: testmsxq_builtin:
+; HARDFP: bl llrintl
+define i64 @testmsxq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll
index 192da565c12fd..9aa95112af533 100644
--- a/llvm/test/CodeGen/ARM/lrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/lrint-conv.ll
@@ -1,6 +1,13 @@
; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; FIXME: crash
+; define i32 @testmswh_builtin(half %x) {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
; SOFTFP-LABEL: testmsws_builtin:
; SOFTFP: bl lrintf
; HARDFP-LABEL: testmsws_builtin:
@@ -21,5 +28,16 @@ entry:
ret i32 %0
}
+; FIXME(#44744): incorrect libcall
+; SOFTFP-LABEL: testmswq_builtin:
+; SOFTFP: bl lrintl
+; HARDFP-LABEL: testmswq_builtin:
+; HARDFP: bl lrintl
+define i32 @testmswq_builtin(fp128 %x) {
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/AVR/llrint.ll b/llvm/test/CodeGen/AVR/llrint.ll
index 32b4c7ab12a4b..0aefb92758260 100644
--- a/llvm/test/CodeGen/AVR/llrint.ll
+++ b/llvm/test/CodeGen/AVR/llrint.ll
@@ -1,6 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=avr -mcpu=atmega328p | FileCheck %s
+; FIXME: crash
+; define i64 @testmsxh_builtin(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
+
define i64 @testmsxs_builtin(float %x) {
; CHECK-LABEL: testmsxs_builtin:
; CHECK: ; %bb.0: ; %entry
@@ -21,5 +28,16 @@ entry:
ret i64 %0
}
+; FIXME(#44744): incorrect libcall
+define i64 @testmsxq_builtin(fp128 %x) {
+; CHECK-LABEL: testmsxq_builtin:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: call llrintl
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i64 @llvm.llrint.fp128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/AVR/lrint.ll b/llvm/test/CodeGen/AVR/lrint.ll
index d7568305f7b51..135535b82f814 100644
--- a/llvm/test/CodeGen/AVR/lrint.ll
+++ b/llvm/test/CodeGen/AVR/lrint.ll
@@ -1,6 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=avr -mcpu=atmega328p | FileCheck %s
+define i32 @testmswh_builtin(half %x) {
+; CHECK-LABEL: testmsws_builtin:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: call lrintf
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+ ret i32 %0
+}
+
define i32 @testmsws_builtin(float %x) {
; CHECK-LABEL: testmsws_builtin:
; CHECK: ; %bb.0: ; %entry
@@ -21,5 +31,15 @@ entry:
ret i32 %0
}
+define i32 @testmswq_builtin(fp128 %x) {
+; CHECK-LABEL: testmswd_builtin:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: call lrint
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.fp128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/LoongArch/lrint-conv.ll b/llvm/test/CodeGen/LoongArch/lrint-conv.ll
new file mode 100644
index 0000000000000..4308fd18593d3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lrint-conv.ll
@@ -0,0 +1,269 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=loongarch32 -mattr=+d | FileCheck %s --check-prefixes=LA32-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=loongarch32 -mattr=+d | FileCheck %s --check-prefixes=LA32-I64
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=loongarch64 -mattr=+d | FileCheck %s --check-prefixes=LA64-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=loongarch64 -mattr=+d | FileCheck %s --check-prefixes=LA64-I64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) #0 {
+; LA32-I32-LABEL: test_lrint_ixx_f32:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: b lrintf
+;
+; LA32-I64-LABEL: test_lrint_ixx_f32:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -16
+; LA32-I64-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-I64-NEXT: bl lrintf
+; LA32-I64-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 16
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_lrint_ixx_f32:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(lrintf)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_lrint_ixx_f32:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: pcaddu18i $t8, %call36(lrintf)
+; LA64-I64-NEXT: jr $t8
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) #0 {
+; LA32-I32-LABEL: test_llrint_ixx_f32:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: b llrintf
+;
+; LA32-I64-LABEL: test_llrint_ixx_f32:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -16
+; LA32-I64-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-I64-NEXT: bl llrintf
+; LA32-I64-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 16
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_llrint_ixx_f32:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(llrintf)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_llrint_ixx_f32:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: pcaddu18i $t8, %call36(llrintf)
+; LA64-I64-NEXT: jr $t8
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) #0 {
+; LA32-I32-LABEL: test_lrint_ixx_f64:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: b lrint
+;
+; LA32-I64-LABEL: test_lrint_ixx_f64:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -16
+; LA32-I64-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-I64-NEXT: bl lrint
+; LA32-I64-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 16
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_lrint_ixx_f64:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(lrint)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_lrint_ixx_f64:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: pcaddu18i $t8, %call36(lrint)
+; LA64-I64-NEXT: jr $t8
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) #0 {
+; LA32-I32-LABEL: test_llrint_ixx_f64:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: b llrint
+;
+; LA32-I64-LABEL: test_llrint_ixx_f64:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -16
+; LA32-I64-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-I64-NEXT: bl llrint
+; LA32-I64-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 16
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_llrint_ixx_f64:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(llrint)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_llrint_ixx_f64:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: pcaddu18i $t8, %call36(llrint)
+; LA64-I64-NEXT: jr $t8
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f128(fp128 %x) #0 {
+; LA32-I32-LABEL: test_lrint_ixx_f128:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: addi.w $sp, $sp, -32
+; LA32-I32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-I32-NEXT: ld.w $a1, $a0, 0
+; LA32-I32-NEXT: ld.w $a2, $a0, 4
+; LA32-I32-NEXT: ld.w $a3, $a0, 8
+; LA32-I32-NEXT: ld.w $a0, $a0, 12
+; LA32-I32-NEXT: st.w $a0, $sp, 20
+; LA32-I32-NEXT: st.w $a3, $sp, 16
+; LA32-I32-NEXT: st.w $a2, $sp, 12
+; LA32-I32-NEXT: addi.w $a0, $sp, 8
+; LA32-I32-NEXT: st.w $a1, $sp, 8
+; LA32-I32-NEXT: bl lrintl
+; LA32-I32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-I32-NEXT: addi.w $sp, $sp, 32
+; LA32-I32-NEXT: ret
+;
+; LA32-I64-LABEL: test_lrint_ixx_f128:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -32
+; LA32-I64-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-I64-NEXT: ld.w $a1, $a0, 0
+; LA32-I64-NEXT: ld.w $a2, $a0, 4
+; LA32-I64-NEXT: ld.w $a3, $a0, 8
+; LA32-I64-NEXT: ld.w $a0, $a0, 12
+; LA32-I64-NEXT: st.w $a0, $sp, 12
+; LA32-I64-NEXT: st.w $a3, $sp, 8
+; LA32-I64-NEXT: st.w $a2, $sp, 4
+; LA32-I64-NEXT: addi.w $a0, $sp, 0
+; LA32-I64-NEXT: st.w $a1, $sp, 0
+; LA32-I64-NEXT: bl lrintl
+; LA32-I64-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 32
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_lrint_ixx_f128:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(lrintl)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_lrint_ixx_f128:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: addi.d $sp, $sp, -16
+; LA64-I64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I64-NEXT: pcaddu18i $ra, %call36(lrintl)
+; LA64-I64-NEXT: jirl $ra, $ra, 0
+; LA64-I64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I64-NEXT: addi.d $sp, $sp, 16
+; LA64-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) #0 {
+; LA32-I32-LABEL: test_llrint_ixx_f128:
+; LA32-I32: # %bb.0:
+; LA32-I32-NEXT: addi.w $sp, $sp, -32
+; LA32-I32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-I32-NEXT: ld.w $a1, $a0, 0
+; LA32-I32-NEXT: ld.w $a2, $a0, 4
+; LA32-I32-NEXT: ld.w $a3, $a0, 8
+; LA32-I32-NEXT: ld.w $a0, $a0, 12
+; LA32-I32-NEXT: st.w $a0, $sp, 20
+; LA32-I32-NEXT: st.w $a3, $sp, 16
+; LA32-I32-NEXT: st.w $a2, $sp, 12
+; LA32-I32-NEXT: addi.w $a0, $sp, 8
+; LA32-I32-NEXT: st.w $a1, $sp, 8
+; LA32-I32-NEXT: bl llrintl
+; LA32-I32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-I32-NEXT: addi.w $sp, $sp, 32
+; LA32-I32-NEXT: ret
+;
+; LA32-I64-LABEL: test_llrint_ixx_f128:
+; LA32-I64: # %bb.0:
+; LA32-I64-NEXT: addi.w $sp, $sp, -32
+; LA32-I64-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-I64-NEXT: ld.w $a1, $a0, 0
+; LA32-I64-NEXT: ld.w $a2, $a0, 4
+; LA32-I64-NEXT: ld.w $a3, $a0, 8
+; LA32-I64-NEXT: ld.w $a0, $a0, 12
+; LA32-I64-NEXT: st.w $a0, $sp, 12
+; LA32-I64-NEXT: st.w $a3, $sp, 8
+; LA32-I64-NEXT: st.w $a2, $sp, 4
+; LA32-I64-NEXT: addi.w $a0, $sp, 0
+; LA32-I64-NEXT: st.w $a1, $sp, 0
+; LA32-I64-NEXT: bl llrintl
+; LA32-I64-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-I64-NEXT: addi.w $sp, $sp, 32
+; LA32-I64-NEXT: ret
+;
+; LA64-I32-LABEL: test_llrint_ixx_f128:
+; LA64-I32: # %bb.0:
+; LA64-I32-NEXT: addi.d $sp, $sp, -16
+; LA64-I32-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I32-NEXT: pcaddu18i $ra, %call36(llrintl)
+; LA64-I32-NEXT: jirl $ra, $ra, 0
+; LA64-I32-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I32-NEXT: addi.d $sp, $sp, 16
+; LA64-I32-NEXT: ret
+;
+; LA64-I64-LABEL: test_llrint_ixx_f128:
+; LA64-I64: # %bb.0:
+; LA64-I64-NEXT: addi.d $sp, $sp, -16
+; LA64-I64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-I64-NEXT: pcaddu18i $ra, %call36(llrintl)
+; LA64-I64-NEXT: jirl $ra, $ra, 0
+; LA64-I64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-I64-NEXT: addi.d $sp, $sp, 16
+; LA64-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/MSP430/lrint-conv.ll b/llvm/test/CodeGen/MSP430/lrint-conv.ll
new file mode 100644
index 0000000000000..c99985bbd7f7c
--- /dev/null
+++ b/llvm/test/CodeGen/MSP430/lrint-conv.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=msp430-unknown-unknown | FileCheck %s --check-prefixes=CHECK-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=msp430-unknown-unknown | FileCheck %s --check-prefixes=CHECK-I64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f32:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: call #lrintf
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f32:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: call #lrintf
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f32:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: call #llrintf
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f32:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: call #llrintf
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f64:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: call #lrint
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f64:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: call #lrint
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f64:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: call #llrint
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f64:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: call #llrint
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f128(fp128 %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f128:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: sub #16, r1
+; CHECK-I32-NEXT: mov 32(r1), 14(r1)
+; CHECK-I32-NEXT: mov 30(r1), 12(r1)
+; CHECK-I32-NEXT: mov 28(r1), 10(r1)
+; CHECK-I32-NEXT: mov 26(r1), 8(r1)
+; CHECK-I32-NEXT: mov 24(r1), 6(r1)
+; CHECK-I32-NEXT: mov 22(r1), 4(r1)
+; CHECK-I32-NEXT: mov 20(r1), 2(r1)
+; CHECK-I32-NEXT: mov 18(r1), 0(r1)
+; CHECK-I32-NEXT: call #lrintl
+; CHECK-I32-NEXT: add #16, r1
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f128:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: sub #16, r1
+; CHECK-I64-NEXT: mov 32(r1), 14(r1)
+; CHECK-I64-NEXT: mov 30(r1), 12(r1)
+; CHECK-I64-NEXT: mov 28(r1), 10(r1)
+; CHECK-I64-NEXT: mov 26(r1), 8(r1)
+; CHECK-I64-NEXT: mov 24(r1), 6(r1)
+; CHECK-I64-NEXT: mov 22(r1), 4(r1)
+; CHECK-I64-NEXT: mov 20(r1), 2(r1)
+; CHECK-I64-NEXT: mov 18(r1), 0(r1)
+; CHECK-I64-NEXT: call #lrintl
+; CHECK-I64-NEXT: add #16, r1
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f128:
+; CHECK-I32: ; %bb.0:
+; CHECK-I32-NEXT: sub #16, r1
+; CHECK-I32-NEXT: mov 32(r1), 14(r1)
+; CHECK-I32-NEXT: mov 30(r1), 12(r1)
+; CHECK-I32-NEXT: mov 28(r1), 10(r1)
+; CHECK-I32-NEXT: mov 26(r1), 8(r1)
+; CHECK-I32-NEXT: mov 24(r1), 6(r1)
+; CHECK-I32-NEXT: mov 22(r1), 4(r1)
+; CHECK-I32-NEXT: mov 20(r1), 2(r1)
+; CHECK-I32-NEXT: mov 18(r1), 0(r1)
+; CHECK-I32-NEXT: call #llrintl
+; CHECK-I32-NEXT: add #16, r1
+; CHECK-I32-NEXT: ret
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f128:
+; CHECK-I64: ; %bb.0:
+; CHECK-I64-NEXT: sub #16, r1
+; CHECK-I64-NEXT: mov 32(r1), 14(r1)
+; CHECK-I64-NEXT: mov 30(r1), 12(r1)
+; CHECK-I64-NEXT: mov 28(r1), 10(r1)
+; CHECK-I64-NEXT: mov 26(r1), 8(r1)
+; CHECK-I64-NEXT: mov 24(r1), 6(r1)
+; CHECK-I64-NEXT: mov 22(r1), 4(r1)
+; CHECK-I64-NEXT: mov 20(r1), 2(r1)
+; CHECK-I64-NEXT: mov 18(r1), 0(r1)
+; CHECK-I64-NEXT: call #llrintl
+; CHECK-I64-NEXT: add #16, r1
+; CHECK-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/Mips/llrint-conv.ll b/llvm/test/CodeGen/Mips/llrint-conv.ll
index dcb4e5657e80b..ee3c0d99253a6 100644
--- a/llvm/test/CodeGen/Mips/llrint-conv.ll
+++ b/llvm/test/CodeGen/Mips/llrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=mips64el -mattr=+soft-float | FileCheck %s
+; RUN: llc < %s -mtriple=mips -mattr=+soft-float | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
define signext i32 @testmsws(float %x) {
; CHECK-LABEL: testmsws:
diff --git a/llvm/test/CodeGen/Mips/lrint-conv.ll b/llvm/test/CodeGen/Mips/lrint-conv.ll
index bd3f7b3babe10..6d2e392675f1c 100644
--- a/llvm/test/CodeGen/Mips/lrint-conv.ll
+++ b/llvm/test/CodeGen/Mips/lrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=mips64el -mattr=+soft-float | FileCheck %s
+; RUN: llc < %s -mtriple=mips -mattr=+soft-float | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %0
+; }
define signext i32 @testmsws(float %x) {
; CHECK-LABEL: testmsws:
diff --git a/llvm/test/CodeGen/PowerPC/llrint-conv.ll b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
index daadf85b4085a..ff41a53464d8c 100644
--- a/llvm/test/CodeGen/PowerPC/llrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/llrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.llrint.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl llrintf
@@ -51,6 +66,23 @@ entry:
ret i64 %0
}
+; CHECK-LABEL: testmswq:
+; CHECK: bl llrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl llrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
declare i64 @llvm.llrint.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/PowerPC/lrint-conv.ll b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
index adfc994497323..7b1a9d6a9fc77 100644
--- a/llvm/test/CodeGen/PowerPC/lrint-conv.ll
+++ b/llvm/test/CodeGen/PowerPC/lrint-conv.ll
@@ -1,4 +1,19 @@
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc | FileCheck %s
+
+; FIXME: crash
+; define signext i32 @testmswh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; %conv = trunc i64 %0 to i32
+; ret i32 %conv
+; }
+
+; define i64 @testmsxh(half %x) {
+; entry:
+; %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+; ret i64 %0
+; }
; CHECK-LABEL: testmsws:
; CHECK: bl lrintf
@@ -51,6 +66,23 @@ entry:
ret i64 %0
}
+; CHECK-LABEL: testmswq:
+; CHECK: bl lrintf128
+define signext i32 @testmswq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ %conv = trunc i64 %0 to i32
+ ret i32 %conv
+}
+
+; CHECK-LABEL: testmslq:
+; CHECK: bl lrintf128
+define i64 @testmslq(fp128 %x) {
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.lrint.i64.f32(float) nounwind readnone
declare i64 @llvm.lrint.i64.f64(double) nounwind readnone
declare i64 @llvm.lrint.i64.ppcf128(ppc_fp128) nounwind readnone
diff --git a/llvm/test/CodeGen/RISCV/lrint-conv.ll b/llvm/test/CodeGen/RISCV/lrint-conv.ll
new file mode 100644
index 0000000000000..2967867551861
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/lrint-conv.ll
@@ -0,0 +1,297 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32-I64
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64-I64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) #0 {
+; RV32-I32-LABEL: test_lrint_ixx_f32:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -16
+; RV32-I32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: call lrintf
+; RV32-I32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 16
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_lrint_ixx_f32:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -16
+; RV32-I64-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: call lrintf
+; RV32-I64-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 16
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_lrint_ixx_f32:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call lrintf
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_lrint_ixx_f32:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call lrintf
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) #0 {
+; RV32-I32-LABEL: test_llrint_ixx_f32:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -16
+; RV32-I32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: call llrintf
+; RV32-I32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 16
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_llrint_ixx_f32:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -16
+; RV32-I64-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: call llrintf
+; RV32-I64-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 16
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_llrint_ixx_f32:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call llrintf
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_llrint_ixx_f32:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call llrintf
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) #0 {
+; RV32-I32-LABEL: test_lrint_ixx_f64:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -16
+; RV32-I32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: call lrint
+; RV32-I32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 16
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_lrint_ixx_f64:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -16
+; RV32-I64-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: call lrint
+; RV32-I64-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 16
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_lrint_ixx_f64:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call lrint
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_lrint_ixx_f64:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call lrint
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) #0 {
+; RV32-I32-LABEL: test_llrint_ixx_f64:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -16
+; RV32-I32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: call llrint
+; RV32-I32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 16
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_llrint_ixx_f64:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -16
+; RV32-I64-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: call llrint
+; RV32-I64-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 16
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_llrint_ixx_f64:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call llrint
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_llrint_ixx_f64:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call llrint
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f128(fp128 %x) #0 {
+; RV32-I32-LABEL: test_lrint_ixx_f128:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -32
+; RV32-I32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: lw a1, 0(a0)
+; RV32-I32-NEXT: lw a2, 4(a0)
+; RV32-I32-NEXT: lw a3, 8(a0)
+; RV32-I32-NEXT: lw a4, 12(a0)
+; RV32-I32-NEXT: addi a0, sp, 8
+; RV32-I32-NEXT: sw a1, 8(sp)
+; RV32-I32-NEXT: sw a2, 12(sp)
+; RV32-I32-NEXT: sw a3, 16(sp)
+; RV32-I32-NEXT: sw a4, 20(sp)
+; RV32-I32-NEXT: call lrintl
+; RV32-I32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 32
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_lrint_ixx_f128:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -32
+; RV32-I64-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: lw a1, 0(a0)
+; RV32-I64-NEXT: lw a2, 4(a0)
+; RV32-I64-NEXT: lw a3, 8(a0)
+; RV32-I64-NEXT: lw a4, 12(a0)
+; RV32-I64-NEXT: mv a0, sp
+; RV32-I64-NEXT: sw a1, 0(sp)
+; RV32-I64-NEXT: sw a2, 4(sp)
+; RV32-I64-NEXT: sw a3, 8(sp)
+; RV32-I64-NEXT: sw a4, 12(sp)
+; RV32-I64-NEXT: call lrintl
+; RV32-I64-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 32
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_lrint_ixx_f128:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call lrintl
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_lrint_ixx_f128:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call lrintl
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) #0 {
+; RV32-I32-LABEL: test_llrint_ixx_f128:
+; RV32-I32: # %bb.0:
+; RV32-I32-NEXT: addi sp, sp, -32
+; RV32-I32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-I32-NEXT: lw a1, 0(a0)
+; RV32-I32-NEXT: lw a2, 4(a0)
+; RV32-I32-NEXT: lw a3, 8(a0)
+; RV32-I32-NEXT: lw a4, 12(a0)
+; RV32-I32-NEXT: addi a0, sp, 8
+; RV32-I32-NEXT: sw a1, 8(sp)
+; RV32-I32-NEXT: sw a2, 12(sp)
+; RV32-I32-NEXT: sw a3, 16(sp)
+; RV32-I32-NEXT: sw a4, 20(sp)
+; RV32-I32-NEXT: call llrintl
+; RV32-I32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-I32-NEXT: addi sp, sp, 32
+; RV32-I32-NEXT: ret
+;
+; RV32-I64-LABEL: test_llrint_ixx_f128:
+; RV32-I64: # %bb.0:
+; RV32-I64-NEXT: addi sp, sp, -32
+; RV32-I64-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-I64-NEXT: lw a1, 0(a0)
+; RV32-I64-NEXT: lw a2, 4(a0)
+; RV32-I64-NEXT: lw a3, 8(a0)
+; RV32-I64-NEXT: lw a4, 12(a0)
+; RV32-I64-NEXT: mv a0, sp
+; RV32-I64-NEXT: sw a1, 0(sp)
+; RV32-I64-NEXT: sw a2, 4(sp)
+; RV32-I64-NEXT: sw a3, 8(sp)
+; RV32-I64-NEXT: sw a4, 12(sp)
+; RV32-I64-NEXT: call llrintl
+; RV32-I64-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-I64-NEXT: addi sp, sp, 32
+; RV32-I64-NEXT: ret
+;
+; RV64-I32-LABEL: test_llrint_ixx_f128:
+; RV64-I32: # %bb.0:
+; RV64-I32-NEXT: addi sp, sp, -16
+; RV64-I32-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I32-NEXT: call llrintl
+; RV64-I32-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I32-NEXT: addi sp, sp, 16
+; RV64-I32-NEXT: ret
+;
+; RV64-I64-LABEL: test_llrint_ixx_f128:
+; RV64-I64: # %bb.0:
+; RV64-I64-NEXT: addi sp, sp, -16
+; RV64-I64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-I64-NEXT: call llrintl
+; RV64-I64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-I64-NEXT: addi sp, sp, 16
+; RV64-I64-NEXT: ret
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/SPARC/lrint-conv.ll b/llvm/test/CodeGen/SPARC/lrint-conv.ll
new file mode 100644
index 0000000000000..f18c83fe78e5c
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/lrint-conv.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=sparc | FileCheck %s --check-prefixes=S32-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=sparc | FileCheck %s --check-prefixes=S32-I64
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=sparc64 | FileCheck %s --check-prefixes=S64-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=sparc64 | FileCheck %s --check-prefixes=S64-I64
+
+; FIXME: crash
+; define ITy @test_lrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f16(half %x) #0 {
+; %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+; ret ITy %res
+; }
+
+define ITy @test_lrint_ixx_f32(float %x) #0 {
+; S32-I32-LABEL: test_lrint_ixx_f32:
+; S32-I32: ! %bb.0:
+; S32-I32-NEXT: save %sp, -96, %sp
+; S32-I32-NEXT: call lrintf
+; S32-I32-NEXT: mov %i0, %o0
+; S32-I32-NEXT: ret
+; S32-I32-NEXT: restore %g0, %o0, %o0
+;
+; S32-I64-LABEL: test_lrint_ixx_f32:
+; S32-I64: ! %bb.0:
+; S32-I64-NEXT: save %sp, -96, %sp
+; S32-I64-NEXT: call lrintf
+; S32-I64-NEXT: mov %i0, %o0
+; S32-I64-NEXT: mov %o0, %i0
+; S32-I64-NEXT: ret
+; S32-I64-NEXT: restore %g0, %o1, %o1
+;
+; S64-I32-LABEL: test_lrint_ixx_f32:
+; S64-I32: ! %bb.0:
+; S64-I32-NEXT: save %sp, -176, %sp
+; S64-I32-NEXT: call lrintf
+; S64-I32-NEXT: nop
+; S64-I32-NEXT: ret
+; S64-I32-NEXT: restore %g0, %o0, %o0
+;
+; S64-I64-LABEL: test_lrint_ixx_f32:
+; S64-I64: ! %bb.0:
+; S64-I64-NEXT: save %sp, -176, %sp
+; S64-I64-NEXT: call lrintf
+; S64-I64-NEXT: nop
+; S64-I64-NEXT: ret
+; S64-I64-NEXT: restore %g0, %o0, %o0
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) #0 {
+; S32-I32-LABEL: test_llrint_ixx_f32:
+; S32-I32: ! %bb.0:
+; S32-I32-NEXT: save %sp, -96, %sp
+; S32-I32-NEXT: call llrintf
+; S32-I32-NEXT: mov %i0, %o0
+; S32-I32-NEXT: ret
+; S32-I32-NEXT: restore %g0, %o0, %o0
+;
+; S32-I64-LABEL: test_llrint_ixx_f32:
+; S32-I64: ! %bb.0:
+; S32-I64-NEXT: save %sp, -96, %sp
+; S32-I64-NEXT: call llrintf
+; S32-I64-NEXT: mov %i0, %o0
+; S32-I64-NEXT: mov %o0, %i0
+; S32-I64-NEXT: ret
+; S32-I64-NEXT: restore %g0, %o1, %o1
+;
+; S64-I32-LABEL: test_llrint_ixx_f32:
+; S64-I32: ! %bb.0:
+; S64-I32-NEXT: save %sp, -176, %sp
+; S64-I32-NEXT: call llrintf
+; S64-I32-NEXT: nop
+; S64-I32-NEXT: ret
+; S64-I32-NEXT: restore %g0, %o0, %o0
+;
+; S64-I64-LABEL: test_llrint_ixx_f32:
+; S64-I64: ! %bb.0:
+; S64-I64-NEXT: save %sp, -176, %sp
+; S64-I64-NEXT: call llrintf
+; S64-I64-NEXT: nop
+; S64-I64-NEXT: ret
+; S64-I64-NEXT: restore %g0, %o0, %o0
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) #0 {
+; S32-I32-LABEL: test_lrint_ixx_f64:
+; S32-I32: ! %bb.0:
+; S32-I32-NEXT: save %sp, -112, %sp
+; S32-I32-NEXT: ! kill: def $i1 killed $i1 killed $i0_i1 def $i0_i1
+; S32-I32-NEXT: ! kill: def $i0 killed $i0 killed $i0_i1 def $i0_i1
+; S32-I32-NEXT: std %i0, [%fp+-8]
+; S32-I32-NEXT: ldd [%fp+-8], %f0
+; S32-I32-NEXT: std %f0, [%fp+-16]
+; S32-I32-NEXT: call lrint
+; S32-I32-NEXT: ldd [%fp+-16], %o0
+; S32-I32-NEXT: ret
+; S32-I32-NEXT: restore %g0, %o0, %o0
+;
+; S32-I64-LABEL: test_lrint_ixx_f64:
+; S32-I64: ! %bb.0:
+; S32-I64-NEXT: save %sp, -104, %sp
+; S32-I64-NEXT: mov %i1, %o1
+; S32-I64-NEXT: ! kill: def $i0 killed $i0 def $i0_i1
+; S32-I64-NEXT: call lrint
+; S32-I64-NEXT: mov %i0, %o0
+; S32-I64-NEXT: mov %o0, %i0
+; S32-I64-NEXT: ret
+; S32-I64-NEXT: restore %g0, %o1, %o1
+;
+; S64-I32-LABEL: test_lrint_ixx_f64:
+; S64-I32: ! %bb.0:
+; S64-I32-NEXT: save %sp, -176, %sp
+; S64-I32-NEXT: call lrint
+; S64-I32-NEXT: nop
+; S64-I32-NEXT: ret
+; S64-I32-NEXT: restore %g0, %o0, %o0
+;
+; S64-I64-LABEL: test_lrint_ixx_f64:
+; S64-I64: ! %bb.0:
+; S64-I64-NEXT: save %sp, -176, %sp
+; S64-I64-NEXT: call lrint
+; S64-I64-NEXT: nop
+; S64-I64-NEXT: ret
+; S64-I64-NEXT: restore %g0, %o0, %o0
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) #0 {
+; S32-I32-LABEL: test_llrint_ixx_f64:
+; S32-I32: ! %bb.0:
+; S32-I32-NEXT: save %sp, -112, %sp
+; S32-I32-NEXT: ! kill: def $i1 killed $i1 killed $i0_i1 def $i0_i1
+; S32-I32-NEXT: ! kill: def $i0 killed $i0 killed $i0_i1 def $i0_i1
+; S32-I32-NEXT: std %i0, [%fp+-8]
+; S32-I32-NEXT: ldd [%fp+-8], %f0
+; S32-I32-NEXT: std %f0, [%fp+-16]
+; S32-I32-NEXT: call llrint
+; S32-I32-NEXT: ldd [%fp+-16], %o0
+; S32-I32-NEXT: ret
+; S32-I32-NEXT: restore %g0, %o0, %o0
+;
+; S32-I64-LABEL: test_llrint_ixx_f64:
+; S32-I64: ! %bb.0:
+; S32-I64-NEXT: save %sp, -104, %sp
+; S32-I64-NEXT: mov %i1, %o1
+; S32-I64-NEXT: ! kill: def $i0 killed $i0 def $i0_i1
+; S32-I64-NEXT: call llrint
+; S32-I64-NEXT: mov %i0, %o0
+; S32-I64-NEXT: mov %o0, %i0
+; S32-I64-NEXT: ret
+; S32-I64-NEXT: restore %g0, %o1, %o1
+;
+; S64-I32-LABEL: test_llrint_ixx_f64:
+; S64-I32: ! %bb.0:
+; S64-I32-NEXT: save %sp, -176, %sp
+; S64-I32-NEXT: call llrint
+; S64-I32-NEXT: nop
+; S64-I32-NEXT: ret
+; S64-I32-NEXT: restore %g0, %o0, %o0
+;
+; S64-I64-LABEL: test_llrint_ixx_f64:
+; S64-I64: ! %bb.0:
+; S64-I64-NEXT: save %sp, -176, %sp
+; S64-I64-NEXT: call llrint
+; S64-I64-NEXT: nop
+; S64-I64-NEXT: ret
+; S64-I64-NEXT: restore %g0, %o0, %o0
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+; FIXME(#41838): unsupported type
+; define ITy @test_lrint_ixx_f128(fp128 %x) #0 {
+; %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+; ret ITy %res
+; }
+
+; define ITy @test_llrint_ixx_f128(fp128 %x) #0 {
+; %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+; ret ITy %res
+; }
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/WebAssembly/lrint-conv.ll b/llvm/test/CodeGen/WebAssembly/lrint-conv.ll
new file mode 100644
index 0000000000000..0a886ed13974d
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/lrint-conv.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+
+; Tests for lrint and llrint, with both i32 and i64 checked.
+
+; RUN: sed 's/ITy/i32/g' %s | llc -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=CHECK-I32
+; RUN: sed 's/ITy/i64/g' %s | llc -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=CHECK-I64
+
+define ITy @test_lrint_ixx_f16(half %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f16:
+; CHECK-I32: .functype test_lrint_ixx_f16 (f32) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call __truncsfhf2
+; CHECK-I32-NEXT: call __extendhfsf2
+; CHECK-I32-NEXT: call lrintf
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f16:
+; CHECK-I64: .functype test_lrint_ixx_f16 (f32) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call __truncsfhf2
+; CHECK-I64-NEXT: call __extendhfsf2
+; CHECK-I64-NEXT: call lrintf
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.lrint.ITy.f16(half %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f16(half %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f16:
+; CHECK-I32: .functype test_llrint_ixx_f16 (f32) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call __truncsfhf2
+; CHECK-I32-NEXT: call __extendhfsf2
+; CHECK-I32-NEXT: call llrintf
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f16:
+; CHECK-I64: .functype test_llrint_ixx_f16 (f32) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call __truncsfhf2
+; CHECK-I64-NEXT: call __extendhfsf2
+; CHECK-I64-NEXT: call llrintf
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.llrint.ITy.f16(half %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f32(float %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f32:
+; CHECK-I32: .functype test_lrint_ixx_f32 (f32) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call lrintf
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f32:
+; CHECK-I64: .functype test_lrint_ixx_f32 (f32) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call lrintf
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.lrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f32(float %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f32:
+; CHECK-I32: .functype test_llrint_ixx_f32 (f32) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call llrintf
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f32:
+; CHECK-I64: .functype test_llrint_ixx_f32 (f32) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call llrintf
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.llrint.ITy.f32(float %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f64(double %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f64:
+; CHECK-I32: .functype test_lrint_ixx_f64 (f64) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call lrint
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f64:
+; CHECK-I64: .functype test_lrint_ixx_f64 (f64) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call lrint
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.lrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f64(double %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f64:
+; CHECK-I32: .functype test_llrint_ixx_f64 (f64) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: call llrint
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f64:
+; CHECK-I64: .functype test_llrint_ixx_f64 (f64) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: call llrint
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.llrint.ITy.f64(double %x)
+ ret ITy %res
+}
+
+define ITy @test_lrint_ixx_f128(fp128 %x) #0 {
+; CHECK-I32-LABEL: test_lrint_ixx_f128:
+; CHECK-I32: .functype test_lrint_ixx_f128 (i64, i64) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: local.get 1
+; CHECK-I32-NEXT: call lrintl
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_lrint_ixx_f128:
+; CHECK-I64: .functype test_lrint_ixx_f128 (i64, i64) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: local.get 1
+; CHECK-I64-NEXT: call lrintl
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.lrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+define ITy @test_llrint_ixx_f128(fp128 %x) #0 {
+; CHECK-I32-LABEL: test_llrint_ixx_f128:
+; CHECK-I32: .functype test_llrint_ixx_f128 (i64, i64) -> (i32)
+; CHECK-I32-NEXT: # %bb.0:
+; CHECK-I32-NEXT: local.get 0
+; CHECK-I32-NEXT: local.get 1
+; CHECK-I32-NEXT: call llrintl
+; CHECK-I32-NEXT: # fallthrough-return
+;
+; CHECK-I64-LABEL: test_llrint_ixx_f128:
+; CHECK-I64: .functype test_llrint_ixx_f128 (i64, i64) -> (i64)
+; CHECK-I64-NEXT: # %bb.0:
+; CHECK-I64-NEXT: local.get 0
+; CHECK-I64-NEXT: local.get 1
+; CHECK-I64-NEXT: call llrintl
+; CHECK-I64-NEXT: # fallthrough-return
+ %res = tail call ITy @llvm.llrint.ITy.f128(fp128 %x)
+ ret ITy %res
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/llrint-conv.ll b/llvm/test/CodeGen/X86/llrint-conv.ll
index 402daf80a15e8..b262e94a6825a 100644
--- a/llvm/test/CodeGen/X86/llrint-conv.ll
+++ b/llvm/test/CodeGen/X86/llrint-conv.ll
@@ -7,14 +7,50 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
-define i64 @testmsxs(float %x) {
+define i64 @testmsxh(half %x) #0 {
+; X86-NOSSE-LABEL: testmsxh:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %eax
+; X86-NOSSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: calll __extendhfsf2
+; X86-NOSSE-NEXT: fstps (%esp)
+; X86-NOSSE-NEXT: calll llrintf
+; X86-NOSSE-NEXT: popl %ecx
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmsxh:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: pinsrw $0, {{[0-9]+}}(%esp), %xmm0
+; X86-SSE2-NEXT: pextrw $0, %xmm0, %eax
+; X86-SSE2-NEXT: movw %ax, (%esp)
+; X86-SSE2-NEXT: calll __extendhfsf2
+; X86-SSE2-NEXT: fstps (%esp)
+; X86-SSE2-NEXT: calll llrintf
+; X86-SSE2-NEXT: popl %ecx
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE-LABEL: testmsxh:
+; X64-SSE: # %bb.0: # %entry
+; X64-SSE-NEXT: pushq %rax
+; X64-SSE-NEXT: callq __extendhfsf2 at PLT
+; X64-SSE-NEXT: callq rintf at PLT
+; X64-SSE-NEXT: callq __truncsfhf2 at PLT
+; X64-SSE-NEXT: callq __extendhfsf2 at PLT
+; X64-SSE-NEXT: cvttss2si %xmm0, %rax
+; X64-SSE-NEXT: popq %rcx
+; X64-SSE-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.llrint.f16(half %x)
+ ret i64 %0
+}
+
+define i64 @testmsxs(float %x) #0 {
; X86-NOSSE-LABEL: testmsxs:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: .cfi_offset %ebp, -8
; X86-NOSSE-NEXT: movl %esp, %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_register %ebp
; X86-NOSSE-NEXT: andl $-8, %esp
; X86-NOSSE-NEXT: subl $8, %esp
; X86-NOSSE-NEXT: flds 8(%ebp)
@@ -23,16 +59,12 @@ define i64 @testmsxs(float %x) {
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl %ebp, %esp
; X86-NOSSE-NEXT: popl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa %esp, 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsxs:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -43,16 +75,12 @@ define i64 @testmsxs(float %x) {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: testmsxs:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -63,7 +91,6 @@ define i64 @testmsxs(float %x) {
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: testmsxs:
@@ -80,14 +107,11 @@ entry:
ret i64 %0
}
-define i64 @testmsxd(double %x) {
+define i64 @testmsxd(double %x) #0 {
; X86-NOSSE-LABEL: testmsxd:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT: .cfi_offset %ebp, -8
; X86-NOSSE-NEXT: movl %esp, %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa_register %ebp
; X86-NOSSE-NEXT: andl $-8, %esp
; X86-NOSSE-NEXT: subl $8, %esp
; X86-NOSSE-NEXT: fldl 8(%ebp)
@@ -96,16 +120,12 @@ define i64 @testmsxd(double %x) {
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl %ebp, %esp
; X86-NOSSE-NEXT: popl %ebp
-; X86-NOSSE-NEXT: .cfi_def_cfa %esp, 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsxd:
; X86-SSE2: # %bb.0: # %entry
; X86-SSE2-NEXT: pushl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -116,16 +136,12 @@ define i64 @testmsxd(double %x) {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
-; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: testmsxd:
; X86-AVX: # %bb.0: # %entry
; X86-AVX-NEXT: pushl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_offset 8
-; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
-; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -136,7 +152,6 @@ define i64 @testmsxd(double %x) {
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
-; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: testmsxd:
@@ -153,14 +168,11 @@ entry:
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) {
+define i64 @testmsll(x86_fp80 %x) #0 {
; X86-LABEL: testmsll:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
-; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %ebp, -8
; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: .cfi_def_cfa_register %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: fldt 8(%ebp)
@@ -169,7 +181,6 @@ define i64 @testmsll(x86_fp80 %x) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
-; X86-NEXT: .cfi_def_cfa %esp, 4
; X86-NEXT: retl
;
; X64-LABEL: testmsll:
@@ -183,6 +194,62 @@ entry:
ret i64 %0
}
+define i64 @testmslq(fp128 %x) #0 {
+; X86-NOSSE-LABEL: testmslq:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll llrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmslq:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll llrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: testmslq:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll llrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: testmslq:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp llrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i64 @llvm.llrint.fp128(fp128 %x)
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.f32(float) nounwind readnone
declare i64 @llvm.llrint.f64(double) nounwind readnone
declare i64 @llvm.llrint.f80(x86_fp80) nounwind readnone
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i32.ll b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
index 21580f53ec9b3..0b4f942b5502c 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i32.ll
@@ -7,16 +7,21 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
-define i32 @testmsws(float %x) {
+; FIXME: crash
+; define i32 @testmswh(half %x) #0 {
+; entry:
+; %0 = tail call i32 @llvm.lrint.i32.f16(half %x)
+; ret i32 %0
+; }
+
+define i32 @testmsws(float %x) #0 {
; X86-NOSSE-LABEL: testmsws:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %eax
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
; X86-NOSSE-NEXT: movl (%esp), %eax
; X86-NOSSE-NEXT: popl %ecx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmsws:
@@ -43,16 +48,14 @@ entry:
ret i32 %0
}
-define i32 @testmswd(double %x) {
+define i32 @testmswd(double %x) #0 {
; X86-NOSSE-LABEL: testmswd:
; X86-NOSSE: # %bb.0: # %entry
; X86-NOSSE-NEXT: pushl %eax
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8
; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NOSSE-NEXT: fistpl (%esp)
; X86-NOSSE-NEXT: movl (%esp), %eax
; X86-NOSSE-NEXT: popl %ecx
-; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4
; X86-NOSSE-NEXT: retl
;
; X86-SSE2-LABEL: testmswd:
@@ -79,16 +82,14 @@ entry:
ret i32 %0
}
-define i32 @testmsll(x86_fp80 %x) {
+define i32 @testmsll(x86_fp80 %x) #0 {
; X86-LABEL: testmsll:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %eax
-; X86-NEXT: .cfi_def_cfa_offset 8
; X86-NEXT: fldt {{[0-9]+}}(%esp)
; X86-NEXT: fistpl (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: popl %ecx
-; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
;
; X64-LABEL: testmsll:
@@ -102,6 +103,62 @@ entry:
ret i32 %0
}
+define i32 @testmswq(fp128 %x) #0 {
+; X86-NOSSE-LABEL: testmswq:
+; X86-NOSSE: # %bb.0: # %entry
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-16, %esp
+; X86-NOSSE-NEXT: subl $16, %esp
+; X86-NOSSE-NEXT: pushl 20(%ebp)
+; X86-NOSSE-NEXT: pushl 16(%ebp)
+; X86-NOSSE-NEXT: pushl 12(%ebp)
+; X86-NOSSE-NEXT: pushl 8(%ebp)
+; X86-NOSSE-NEXT: calll lrintl
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: testmswq:
+; X86-SSE2: # %bb.0: # %entry
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-16, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: pushl 20(%ebp)
+; X86-SSE2-NEXT: pushl 16(%ebp)
+; X86-SSE2-NEXT: pushl 12(%ebp)
+; X86-SSE2-NEXT: pushl 8(%ebp)
+; X86-SSE2-NEXT: calll lrintl
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: testmswq:
+; X86-AVX: # %bb.0: # %entry
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-16, %esp
+; X86-AVX-NEXT: subl $32, %esp
+; X86-AVX-NEXT: vmovups 8(%ebp), %xmm0
+; X86-AVX-NEXT: vmovups %xmm0, (%esp)
+; X86-AVX-NEXT: calll lrintl
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-LABEL: testmswq:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
+ ret i32 %0
+}
+
declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
declare i32 @llvm.lrint.i32.f80(x86_fp80) nounwind readnone
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/lrint-conv-i64.ll b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
index 38fa09085e189..176733a805c10 100644
--- a/llvm/test/CodeGen/X86/lrint-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lrint-conv-i64.ll
@@ -3,7 +3,23 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX
-define i64 @testmsxs(float %x) {
+define i64 @testmsxh(half %x) #0 {
+; SSE-LABEL: testmsxh:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rax
+; SSE-NEXT: callq __extendhfsf2 at PLT
+; SSE-NEXT: callq rintf at PLT
+; SSE-NEXT: callq __truncsfhf2 at PLT
+; SSE-NEXT: callq __extendhfsf2 at PLT
+; SSE-NEXT: cvttss2si %xmm0, %rax
+; SSE-NEXT: popq %rcx
+; SSE-NEXT: retq
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f16(half %x)
+ ret i64 %0
+}
+
+define i64 @testmsxs(float %x) #0 {
; SSE-LABEL: testmsxs:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -18,7 +34,7 @@ entry:
ret i64 %0
}
-define i64 @testmsxd(double %x) {
+define i64 @testmsxd(double %x) #0 {
; SSE-LABEL: testmsxd:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtsd2si %xmm0, %rax
@@ -33,7 +49,7 @@ entry:
ret i64 %0
}
-define i64 @testmsll(x86_fp80 %x) {
+define i64 @testmsll(x86_fp80 %x) #0 {
; CHECK-LABEL: testmsll:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
@@ -45,7 +61,16 @@ entry:
ret i64 %0
}
-define i32 @PR125324(float %x) {
+define i64 @testmsxq(fp128 %x) #0 {
+; CHECK-LABEL: testmsxq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: jmp lrintl at PLT # TAILCALL
+entry:
+ %0 = tail call i64 @llvm.lrint.i64.f128(fp128 %x)
+ ret i64 %0
+}
+
+define i32 @PR125324(float %x) #0 {
; SSE-LABEL: PR125324:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2si %xmm0, %rax
@@ -66,3 +91,5 @@ entry:
declare i64 @llvm.lrint.i64.f32(float) nounwind readnone
declare i64 @llvm.lrint.i64.f64(double) nounwind readnone
declare i64 @llvm.lrint.i64.f80(x86_fp80) nounwind readnone
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list