[llvm] [AArch64] Add test coverage for roundeven + float-to-int conversions (PR #173515)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 24 16:54:11 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: None (valadaptive)

<details>
<summary>Changes</summary>

Split off from https://github.com/llvm/llvm-project/pull/170018, which was becoming a bit of a rats' nest.

This PR:
- Cleans up the `arm64-cvt-simd-fptoi.ll`, `round-conv.ll`, `round-fptosi-sat-scalar.ll`, and `round-fptoui-sat-scalar.ll` regression tests, removing `nounwind readnone` annotations from the rounding function calls. These were changed in #<!-- -->171288 to be LLVM intrinsics instead of libcalls, so we no longer need to annotate them in order for them to be compiled to native instructions. I've also removed the declaration blocks at the end of the files, since they're no longer necessary.

- Adds `roundeven` tests to the 4 regression tests mentioned above, to go along with the other rounding functions currently tested.

- Changes `round-conv.ll` to have autogenerated CHECK lines/assertions, so it's simpler to update.

- Checks with GlobalISel as well in `round-conv.ll`, `round-fptosi-sat-scalar.ll`, and `round-fptoui-sat-scalar.ll`. The generated code is identical in `round-conv.ll`, but not yet as good in the latter two.

The supporting changes are necessary here and would be difficult to split off for little gain.

---

Patch is 75.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/173515.diff


4 Files Affected:

- (modified) llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll (+388-16) 
- (modified) llvm/test/CodeGen/AArch64/round-conv.ll (+258-128) 
- (modified) llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll (+416-113) 
- (modified) llvm/test/CodeGen/AArch64/round-fptoui-sat-scalar.ll (+412-103) 


``````````diff
diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
index a729772f2897a..d49a9795546a5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-fptoi.ll
@@ -543,6 +543,154 @@ define double @fcvtau_dd_round_simd(double %a) {
   ret double %bc
 }
 
+define double @fcvtns_ds_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ds_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ds_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptosi float %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtns_sd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptosi double %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtns_ss_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ss_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ss_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptosi float %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptosi double %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+
+define double @fcvtnu_ds_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ds_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptoui float %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptoui double %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtnu_ss_roundeven_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ss_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = fptoui float %r to i32
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dd_roundeven_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_roundeven_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dd_roundeven_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = fptoui double %r to i64
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
 
 define double @fcvtms_ds_round_simd(float %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtms_ds_round_simd:
@@ -1153,7 +1301,7 @@ define float @fcvtas_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtas s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.round.f16(half %a) nounwind readnone
+  %r = call half @llvm.round.f16(half %a)
   %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1170,7 +1318,7 @@ define double @fcvtas_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtas d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.round.f16(half %a) nounwind readnone
+  %r = call half @llvm.round.f16(half %a)
   %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1253,7 +1401,7 @@ define float @fcvtau_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtau s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.round.f16(half %a) nounwind readnone
+  %r = call half @llvm.round.f16(half %a)
   %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1270,7 +1418,7 @@ define double @fcvtau_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtau d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.round.f16(half %a) nounwind readnone
+  %r = call half @llvm.round.f16(half %a)
   %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1342,6 +1490,230 @@ define double @fcvtau_dd_simd(double %a) {
   ret double %bc
 }
 
+define float @fcvtns_sh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzs s0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a)
+  %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzs d0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a)
+  %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define double @fcvtns_ds_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ds_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ds_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i64 @llvm.fptosi.sat.i64.f32(float %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtns_sd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_sd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_sd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i32 @llvm.fptosi.sat.i32.f64(double %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtns_ss_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_ss_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_ss_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i32 @llvm.fptosi.sat.i32.f32(float %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtns_dd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtns_dd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtns_dd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i64 @llvm.fptosi.sat.i64.f64(double %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzu s0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a)
+  %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dh_simd(half %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dh_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dh_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn h0, h0
+; CHECK-NEXT:    fcvtzu d0, h0
+; CHECK-NEXT:    ret
+  %r = call half @llvm.roundeven.f16(half %a)
+  %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define double @fcvtnu_ds_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ds_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ds_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu d0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i64 @llvm.fptoui.sat.i64.f32(float %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
+define float @fcvtnu_sd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_sd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_sd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu s0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i32 @llvm.fptoui.sat.i32.f64(double %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define float @fcvtnu_ss_simd(float %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_ss_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu s0, s0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_ss_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn s0, s0
+; CHECK-NEXT:    fcvtzu s0, s0
+; CHECK-NEXT:    ret
+  %r = call float @llvm.roundeven.f32(float %a)
+  %i = call i32 @llvm.fptoui.sat.i32.f32(float %r)
+  %bc = bitcast i32 %i to float
+  ret float %bc
+}
+
+define double @fcvtnu_dd_simd(double %a) {
+; CHECK-NOFPRCVT-LABEL: fcvtnu_dd_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintn d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzu d0, d0
+; CHECK-NOFPRCVT-NEXT:    ret
+;
+; CHECK-LABEL: fcvtnu_dd_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintn d0, d0
+; CHECK-NEXT:    fcvtzu d0, d0
+; CHECK-NEXT:    ret
+  %r = call double @llvm.roundeven.f64(double %a)
+  %i = call i64 @llvm.fptoui.sat.i64.f64(double %r)
+  %bc = bitcast i64 %i to double
+  ret double %bc
+}
+
 define float @fcvtms_sh_simd(half %a) {
 ; CHECK-NOFPRCVT-LABEL: fcvtms_sh_simd:
 ; CHECK-NOFPRCVT:       // %bb.0:
@@ -1353,7 +1725,7 @@ define float @fcvtms_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtms s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.floor.f16(half %a) nounwind readnone
+  %r = call half @llvm.floor.f16(half %a)
   %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1370,7 +1742,7 @@ define double @fcvtms_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtms d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.floor.f16(half %a) nounwind readnone
+  %r = call half @llvm.floor.f16(half %a)
   %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1453,7 +1825,7 @@ define float @fcvtmu_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtmu s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.floor.f16(half %a) nounwind readnone
+  %r = call half @llvm.floor.f16(half %a)
   %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1470,7 +1842,7 @@ define double @fcvtmu_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtmu d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.floor.f16(half %a) nounwind readnone
+  %r = call half @llvm.floor.f16(half %a)
   %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1553,7 +1925,7 @@ define float @fcvtps_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtps s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.ceil.f16(half %a) nounwind readnone
+  %r = call half @llvm.ceil.f16(half %a)
   %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1570,7 +1942,7 @@ define double @fcvtps_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtps d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.ceil.f16(half %a) nounwind readnone
+  %r = call half @llvm.ceil.f16(half %a)
   %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1653,7 +2025,7 @@ define float @fcvtpu_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtpu s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.ceil.f16(half %a) nounwind readnone
+  %r = call half @llvm.ceil.f16(half %a)
   %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1670,7 +2042,7 @@ define double @fcvtpu_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtpu d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.ceil.f16(half %a) nounwind readnone
+  %r = call half @llvm.ceil.f16(half %a)
   %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1753,7 +2125,7 @@ define float @fcvtzs_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtzs s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.trunc.f16(half %a) nounwind readnone
+  %r = call half @llvm.trunc.f16(half %a)
   %i = call i32 @llvm.fptosi.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1770,7 +2142,7 @@ define double @fcvtzs_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtzs d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.trunc.f16(half %a) nounwind readnone
+  %r = call half @llvm.trunc.f16(half %a)
   %i = call i64 @llvm.fptosi.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
@@ -1853,7 +2225,7 @@ define float @fcvtzu_sh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtzu s0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.trunc.f16(half %a) nounwind readnone
+  %r = call half @llvm.trunc.f16(half %a)
   %i = call i32 @llvm.fptoui.sat.i32.f16(half %r)
   %bc = bitcast i32 %i to float
   ret float %bc
@@ -1870,7 +2242,7 @@ define double @fcvtzu_dh_simd(half %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvtzu d0, h0
 ; CHECK-NEXT:    ret
-  %r = call half @llvm.trunc.f16(half %a) nounwind readnone
+  %r = call half @llvm.trunc.f16(half %a)
   %i = call i64 @llvm.fptoui.sat.i64.f16(half %r)
   %bc = bitcast i64 %i to double
   ret double %bc
diff --git a/llvm/test/CodeGen/AArch64/round-conv.ll b/llvm/test/CodeGen/AArch64/round-conv.ll
index d78aa207925a4..55a1998589733 100644
--- a/llvm/test/CodeGen/AArch64/round-conv.ll
+++ b/llvm/test/CodeGen/AArch64/round-conv.ll
@@ -1,321 +1,451 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
 ; RUN: llc < %s -mtriple=arm64 | FileCheck %s
+; RUN: llc < %s -mtriple=arm64 -global-isel | FileCheck %s
 
-; CHECK-LABEL: testmsws:
-; CHECK: fcvtms w0, s0
-; CHECK-NOT: frintx {{s[0-9]+}}, s0
 define i32 @testmsws(float %a) {
+; CHECK-LABEL: testmsws:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtms w0, s0
+; CHECK-NEXT:    ret
 entry:
-  %call = call float @llvm.floor.f32(float %a) nounwind readnone
+  %call = call float @llvm.floor.f32(float %a)
   %conv = fptosi float %call to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: testmsxs:
-; CHECK: fcvtms x0, s0
-; CHECK-NOT: frintx {{s[0-9]+}}, s0
 define i64 @testmsxs(float %a) {
+; CHECK-LABEL: testmsxs:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtms x0, s0
+; CHECK-NEXT:    ret
 entry:
-  %call = call float @llvm.floor.f32(float %a) nounwind readnone
+  %call = call float @llvm.floor.f32(float %a)
   %conv = fptosi float %call to i64
   ret i64 %conv
 }
 
-; CHECK-LABEL: testmswd:
-; CHECK: fcvtms w0, d0
-; CHECK-NOT: frintx {{d[0-9]+}}, d0
 define i32 @testmswd(double %a) {
+; CHECK-LABEL: testmswd:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtms w0, d0
+; CHECK-NEXT:    ret
 entry:
-  %call = call double @llvm.floor.f64(double %a) nounwind readnone
+  %call = call double @llvm.floor.f64(double %a)
   %conv = fptosi double %call to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: testmsxd:
-; CHECK: fcvtms x0, d0
-; CHECK-NOT: frintx {{d[0-9]+}}, d0
 define i64 @testmsxd(double %a) {
+; CHECK-LABEL: testmsxd:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtms x0, d0
+; CHECK-NEXT:    ret
 entry:
-  %call = call double @llvm.floor.f64(double %a) nounwind readnone
+  %call = call double @llvm.floor.f64(double %a)
   %conv = fptosi double %call to i64
   ret i64 %conv
 }
 
-; CHECK-LABEL: testmuws:
-; CHECK: fcvtmu w0, s0
-; CHECK-NOT: frintx {{s[0-9]+}}, s0
 define i32 @testmuws(float %a) {
+; CHECK-LABEL: testmuws:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtmu w0, s0
+; CHECK-NEXT:    ret
 entry:
-  %call = call float @llvm.floor.f32(float %a) nounwind readnone
+  %call = call float @llvm.floor.f32(float %a)
   %conv = fptoui float %call to i32
   ret i32 %conv
 }
 
-; CHECK-LABEL: testmuxs:
-; CHECK: fcvtmu x0, s0
-; CHECK-NOT: frintx {{s[0-9]+}}, s0
 define i64 @testmuxs(float %a) {
+; CHECK-LABEL: testmuxs:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcvtmu x0, s0
+; CHECK-NEXT:    ret
 entry:
-  %call = call float @llvm.floor.f32(float %a) nounwind readnone
+  %call = call float @llvm.floor.f32(float %a)
   %conv = fptoui float %call to i64
   ret i64 %conv
 }
 ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/173515


More information about the llvm-commits mailing list