[llvm] [AArch64] Add scal_to_vec patterns for rounding nodes (PR #173004)

via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 19 05:20:06 PST 2025


https://github.com/Lukacma updated https://github.com/llvm/llvm-project/pull/173004

>From 9d718e8829afbab0464e915a99e975d15abe0711 Mon Sep 17 00:00:00 2001
From: Marian Lukac <Marian.Lukac at arm.com>
Date: Fri, 19 Dec 2025 13:16:22 +0000
Subject: [PATCH 1/2] [AArch64] Add scal_to_vec patterns for rounding nodes

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  28 +++
 .../arm64-cvt-simd-round-rint-strictfp.ll     | 194 ++++++++++++++++++
 .../AArch64/arm64-cvt-simd-round-rint.ll      | 194 ++++++++++++++++++
 .../AArch64/sve-fixed-vector-llrint.ll        |   3 +-
 .../CodeGen/AArch64/sve-fixed-vector-lrint.ll |   3 +-
 llvm/test/CodeGen/AArch64/vector-llrint.ll    |  17 +-
 llvm/test/CodeGen/AArch64/vector-lrint.ll     |  17 +-
 7 files changed, 428 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index f3cd613a6bd99..b157573d4a4de 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6867,6 +6867,15 @@ def : Pat<(f32 (bitconvert (i32 (any_lround f64:$Rn)))),
           (FCVTASSDr f64:$Rn)>;
 def : Pat<(f64 (bitconvert (i64 (any_llround f32:$Rn)))),
           (FCVTASDSr f32:$Rn)>;
+
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lround f16:$Rn)))),
+          (FCVTASDHr f16:$Rn)>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llround f16:$Rn)))),
+          (FCVTASDHr f16:$Rn)>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lround f32:$Rn)))),
+          (FCVTASDSr f32:$Rn)>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llround f32:$Rn)))),
+          (FCVTASDSr f32:$Rn)>;
 }
 def : Pat<(f32 (bitconvert (i32 (any_lround f32:$Rn)))),
           (FCVTASv1i32 f32:$Rn)>;
@@ -6875,6 +6884,11 @@ def : Pat<(f64 (bitconvert (i64 (any_lround f64:$Rn)))),
 def : Pat<(f64 (bitconvert (i64 (any_llround f64:$Rn)))),
           (FCVTASv1i64 f64:$Rn)>;
 
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lround f64:$Rn)))),
+          (FCVTASv1i64 f64:$Rn)>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llround f64:$Rn)))),
+          (FCVTASv1i64 f64:$Rn)>;
+
 // For global-isel we can use register classes to determine
 // which FCVT instruction to use.
 let Predicates = [HasFPRCVT] in {
@@ -6911,6 +6925,15 @@ def : Pat<(f32 (bitconvert (i32 (any_lrint f64:$Rn)))),
           (FCVTZSSDr (FRINTXDr f64:$Rn))>;
 def : Pat<(f64 (bitconvert (i64 (any_llrint f32:$Rn)))),
           (FCVTZSDSr (FRINTXSr f32:$Rn))>;
+
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lrint f16:$Rn)))),
+          (FCVTZSDHr (FRINTXHr f16:$Rn))>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llrint f16:$Rn)))),
+          (FCVTZSDHr (FRINTXHr f16:$Rn))>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lrint f32:$Rn)))),
+          (FCVTZSDSr (FRINTXSr f32:$Rn))>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llrint f32:$Rn)))),
+          (FCVTZSDSr (FRINTXSr f32:$Rn))>;
 }
 def : Pat<(f32 (bitconvert (i32 (any_lrint f32:$Rn)))),
           (FCVTZSv1i32 (FRINTXSr f32:$Rn))>;
@@ -6919,6 +6942,11 @@ def : Pat<(f64 (bitconvert (i64 (any_lrint f64:$Rn)))),
 def : Pat<(f64 (bitconvert (i64 (any_llrint f64:$Rn)))),
           (FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
 
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_lrint f64:$Rn)))),
+          (FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
+def : Pat<(v1i64 (scalar_to_vector (i64 (any_llrint f64:$Rn)))),
+          (FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
+
 
 // f16 -> s16 conversions
 let Predicates = [HasFullFP16] in {
diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
index 7633a9b3fff24..9a3a6f4d14539 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
@@ -284,3 +284,197 @@ define double @llrint_i64_f64_simd_exp(double %x)  {
   %bc  = bitcast i64 %val to double
   ret double %bc
 }
+
+;
+; Lround scalar_to_vector strictfp
+;
+
+define <1 x i64> @lround_i64_f16_scalar_to_vec_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f16_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f16_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f16(half %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lround_i64_f32_scalar_to_vec_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f32_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f32_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f32(float %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lround_i64_f64_scalar_to_vec_exp(double %x)  {
+; CHECK-LABEL: lround_i64_f64_scalar_to_vec_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f64(double %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Llround scalar_to_vector strictfp
+;
+
+define <1 x i64> @llround_i64_f16_scalar_to_vec_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f16_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f16_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llround_i64_f32_scalar_to_vec_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f32_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f32_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llround_i64_f64_scalar_to_vec_exp(double %x)  {
+; CHECK-LABEL: llround_i64_f64_scalar_to_vec_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Lrint scalar_to_vector strictfp
+;
+
+define <1 x i64> @lrint_i64_f16_scalar_to_vec_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f16_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f16_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i53.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lrint_i64_f32_scalar_to_vec_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f32_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f32_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lrint_i64_f64_scalar_to_vec_exp(double %x)  {
+; CHECK-LABEL: lrint_i64_f64_scalar_to_vec_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Llrint scalar_to_vector strictfp
+;
+
+define <1 x i64> @llrint_i64_f16_scalar_to_vec_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f16_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f16_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llrint_i64_f32_scalar_to_vec_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f32_scalar_to_vec_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f32_scalar_to_vec_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llrint_i64_f64_scalar_to_vec_exp(double %x)  {
+; CHECK-LABEL: llrint_i64_f64_scalar_to_vec_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %vec = insertelement <1 x i64> poison, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll
index 8da3bfaf09f30..28a04ff504ab2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll
@@ -287,3 +287,197 @@ define double @llrint_i64_f64_simd(double %x)  {
   %bc  = bitcast i64 %val to double
   ret double %bc
 }
+
+;
+; Lround scalar_to_vector
+;
+
+define <1 x i64> @lround_i64_f16_scalar_to_vec(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f16_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f16_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f16(half %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lround_i64_f32_scalar_to_vec(float %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f32_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f32_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f32(float %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lround_i64_f64_scalar_to_vec(double %x)  {
+; CHECK-LABEL: lround_i64_f64_scalar_to_vec:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f64(double %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Llround scalar_to_vector
+;
+
+define <1 x i64> @llround_i64_f16_scalar_to_vec(half %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f16_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f16_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f16(half %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llround_i64_f32_scalar_to_vec(float %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f32_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f32_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f32(float %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llround_i64_f64_scalar_to_vec(double %x)  {
+; CHECK-LABEL: llround_i64_f64_scalar_to_vec:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f64(double %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Lrint scalar_to_vector
+;
+
+define <1 x i64> @lrint_i64_f16_scalar_to_vec(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f16_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f16_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f16(half %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lrint_i64_f32_scalar_to_vec(float %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f32_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f32_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f32(float %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @lrint_i64_f64_scalar_to_vec(double %x)  {
+; CHECK-LABEL: lrint_i64_f64_scalar_to_vec:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f64(double %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+;
+; Llrint scalar_to_vector
+;
+
+define <1 x i64> @llrint_i64_f16_scalar_to_vec(half %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f16_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f16_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f16(half %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llrint_i64_f32_scalar_to_vec(float %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f32_scalar_to_vec:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f32_scalar_to_vec:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f32(float %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
+
+define <1 x i64> @llrint_i64_f64_scalar_to_vec(double %x)  {
+; CHECK-LABEL: llrint_i64_f64_scalar_to_vec:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f64(double %x)
+  %vec = insertelement <1 x i64> undef, i64 %val, i32 0
+  ret <1 x i64> %vec
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
index a8b2c30bec562..8bab65d7b430e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-llrint.ll
@@ -542,8 +542,7 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
 ; CHECK-LABEL: llrint_v1i64_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    frintx d0, d0
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    fcvtzs d0, d0
 ; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
   ret <1 x i64> %a
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
index 465ba38b17874..d66c42b37bd7a 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-vector-lrint.ll
@@ -1006,8 +1006,7 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) nounwind {
 ; CHECK-i64-LABEL: lrint_v1f64:
 ; CHECK-i64:       // %bb.0:
 ; CHECK-i64-NEXT:    frintx d0, d0
-; CHECK-i64-NEXT:    fcvtzs x8, d0
-; CHECK-i64-NEXT:    fmov d0, x8
+; CHECK-i64-NEXT:    fcvtzs d0, d0
 ; CHECK-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
   ret <1 x iXLen> %a
diff --git a/llvm/test/CodeGen/AArch64/vector-llrint.ll b/llvm/test/CodeGen/AArch64/vector-llrint.ll
index ae7617d9c0b66..d9a9e57fe0a63 100644
--- a/llvm/test/CodeGen/AArch64/vector-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-llrint.ll
@@ -806,18 +806,11 @@ define <32 x i64> @llrint_v32i64_v32f32(<32 x float> %x) nounwind {
 declare <32 x i64> @llvm.llrint.v32i64.v32f32(<32 x float>)
 
 define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
-; CHECK-SD-LABEL: llrint_v1i64_v1f64:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    frintx d0, d0
-; CHECK-SD-NEXT:    fcvtzs x8, d0
-; CHECK-SD-NEXT:    fmov d0, x8
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: llrint_v1i64_v1f64:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    frintx d0, d0
-; CHECK-GI-NEXT:    fcvtzs d0, d0
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: llrint_v1i64_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
   %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
   ret <1 x i64> %a
 }
diff --git a/llvm/test/CodeGen/AArch64/vector-lrint.ll b/llvm/test/CodeGen/AArch64/vector-lrint.ll
index 9eaad687fb4a2..8bbe4eaaa344d 100644
--- a/llvm/test/CodeGen/AArch64/vector-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/vector-lrint.ll
@@ -1326,18 +1326,11 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) nounwind {
 ; CHECK-i32-NEXT:    fmov s0, w8
 ; CHECK-i32-NEXT:    ret
 ;
-; CHECK-i64-SD-LABEL: lrint_v1f64:
-; CHECK-i64-SD:       // %bb.0:
-; CHECK-i64-SD-NEXT:    frintx d0, d0
-; CHECK-i64-SD-NEXT:    fcvtzs x8, d0
-; CHECK-i64-SD-NEXT:    fmov d0, x8
-; CHECK-i64-SD-NEXT:    ret
-;
-; CHECK-i64-GI-LABEL: lrint_v1f64:
-; CHECK-i64-GI:       // %bb.0:
-; CHECK-i64-GI-NEXT:    frintx d0, d0
-; CHECK-i64-GI-NEXT:    fcvtzs d0, d0
-; CHECK-i64-GI-NEXT:    ret
+; CHECK-i64-LABEL: lrint_v1f64:
+; CHECK-i64:       // %bb.0:
+; CHECK-i64-NEXT:    frintx d0, d0
+; CHECK-i64-NEXT:    fcvtzs d0, d0
+; CHECK-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
   ret <1 x iXLen> %a
 }

>From c4bcd072ed6cddff0c386865cb600d0f1db0cd41 Mon Sep 17 00:00:00 2001
From: Marian Lukac <Marian.Lukac at arm.com>
Date: Fri, 19 Dec 2025 13:19:45 +0000
Subject: [PATCH 2/2] fix test

---
 .../CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
index 9a3a6f4d14539..2f17404475bb3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
@@ -200,7 +200,7 @@ define double @lrint_i64_f16_simd_exp(half %x)  {
 ; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
-  %val = call i64 @llvm.experimental.constrained.lrint.i53.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
   %bc  = bitcast i64 %val to double
   ret double %bc
 }
@@ -394,7 +394,7 @@ define <1 x i64> @lrint_i64_f16_scalar_to_vec_exp(half %x)  {
 ; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
 ; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
 ; CHECK-NOFPRCVT-NEXT:    ret
-  %val = call i64 @llvm.experimental.constrained.lrint.i53.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
   %vec = insertelement <1 x i64> poison, i64 %val, i32 0
   ret <1 x i64> %vec
 }



More information about the llvm-commits mailing list