[llvm] 2766002 - [AArch64]SIMD fpcvt codegen for rounding nodes (#165546)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 9 05:32:23 PST 2025


Author: Lukacma
Date: 2025-12-09T13:32:19Z
New Revision: 27660022743c560d266bb9c0c83454f663a2d7ad

URL: https://github.com/llvm/llvm-project/commit/27660022743c560d266bb9c0c83454f663a2d7ad
DIFF: https://github.com/llvm/llvm-project/commit/27660022743c560d266bb9c0c83454f663a2d7ad.diff

LOG: [AArch64]SIMD fpcvt codegen for rounding nodes (#165546)

This is followup patch to
https://github.com/llvm/llvm-project/pull/157680, which allows simd
fpcvt instructions to be generated from l/llround and l/llrint nodes.

Added: 
    llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
    llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64InstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 0cbe867497625..7ee094ad4ac87 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6841,6 +6841,49 @@ defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, ftrunc, "F
 defm : FPToIntegerPats<fp_to_sint, fp_to_sint_sat, fp_to_sint_sat_gi, fround, "FCVTAS">;
 defm : FPToIntegerPats<fp_to_uint, fp_to_uint_sat, fp_to_uint_sat_gi, fround, "FCVTAU">;
 
+let Predicates = [HasFPRCVT] in {
+  def : Pat<(f32 (bitconvert (i32 (any_lround f16:$Rn)))),
+            (FCVTASSHr f16:$Rn)>;
+  def : Pat<(f64 (bitconvert (i64 (any_lround f16:$Rn)))),
+            (FCVTASDHr f16:$Rn)>;
+  def : Pat<(f64 (bitconvert (i64 (any_llround f16:$Rn)))),
+            (FCVTASDHr f16:$Rn)>;
+  def : Pat<(f64 (bitconvert (i64 (any_lround f32:$Rn)))),
+            (FCVTASDSr f32:$Rn)>;
+  def : Pat<(f32 (bitconvert (i32 (any_lround f64:$Rn)))),
+            (FCVTASSDr f64:$Rn)>;
+  def : Pat<(f64 (bitconvert (i64 (any_llround f32:$Rn)))),
+            (FCVTASDSr f32:$Rn)>;
+}
+def : Pat<(f32 (bitconvert (i32 (any_lround f32:$Rn)))),
+          (FCVTASv1i32 f32:$Rn)>;
+def : Pat<(f64 (bitconvert (i64 (any_lround f64:$Rn)))),
+          (FCVTASv1i64 f64:$Rn)>;
+def : Pat<(f64 (bitconvert (i64 (any_llround f64:$Rn)))),
+          (FCVTASv1i64 f64:$Rn)>;
+
+let Predicates = [HasFPRCVT] in {
+  def : Pat<(f32 (bitconvert (i32 (any_lrint f16:$Rn)))),
+            (FCVTZSSHr (FRINTXHr f16:$Rn))>;
+  def : Pat<(f64 (bitconvert (i64 (any_lrint f16:$Rn)))),
+            (FCVTZSDHr (FRINTXHr f16:$Rn))>;
+  def : Pat<(f64 (bitconvert (i64 (any_llrint f16:$Rn)))),
+            (FCVTZSDHr (FRINTXHr f16:$Rn))>;
+  def : Pat<(f64 (bitconvert (i64 (any_lrint f32:$Rn)))),
+            (FCVTZSDSr (FRINTXSr f32:$Rn))>;
+  def : Pat<(f32 (bitconvert (i32 (any_lrint f64:$Rn)))),
+            (FCVTZSSDr (FRINTXDr f64:$Rn))>;
+  def : Pat<(f64 (bitconvert (i64 (any_llrint f32:$Rn)))),
+            (FCVTZSDSr (FRINTXSr f32:$Rn))>;
+}
+def : Pat<(f32 (bitconvert (i32 (any_lrint f32:$Rn)))),
+          (FCVTZSv1i32 (FRINTXSr f32:$Rn))>;
+def : Pat<(f64 (bitconvert (i64 (any_lrint f64:$Rn)))),
+          (FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
+def : Pat<(f64 (bitconvert (i64 (any_llrint f64:$Rn)))),
+          (FCVTZSv1i64 (FRINTXDr f64:$Rn))>;
+
+
 // f16 -> s16 conversions
 let Predicates = [HasFullFP16] in {
   def : Pat<(i16(fp_to_sint_sat_gi f16:$Rn)), (FCVTZSv1f16 f16:$Rn)>;

diff  --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
new file mode 100644
index 0000000000000..7633a9b3fff24
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint-strictfp.ll
@@ -0,0 +1,286 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FPRCVT
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFPRCVT
+
+;
+; Lround strictfp
+;
+
+define float @lround_i32_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i32_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas s0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i32_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f16(half %x, metadata !"fpexcept.strict")
+  %sum = bitcast i32 %val to float
+  ret float %sum
+}
+
+define float @lround_i32_f32_simd_exp(float %x)  {
+; CHECK-LABEL: lround_i32_f32_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, s0
+; CHECK-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define float @lround_i32_f64_simd_exp(double %x)  {
+; CHECK-FPRCVT-LABEL: lround_i32_f64_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas s0, d0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i32_f64_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define double @lround_i64_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f16(half %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lround_i64_f32_simd_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f32_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f32_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f32(float %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lround_i64_f64_simd_exp(double %x)  {
+; CHECK-LABEL: lround_i64_f64_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lround.i64.f64(double %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Llround strictfp
+;
+
+define double @llround_i64_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f16(half %x, metadata !"fpexcept.strict")
+  %sum = bitcast i64 %val to double
+  ret double %sum
+}
+
+define double @llround_i64_f32_simd_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f32_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f32_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @llround_i64_f64_simd_exp(double %x)  {
+; CHECK-LABEL: llround_i64_f64_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Lrint strictfp
+;
+
+define float @lrint_i32_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i32_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs s0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i32_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %sum = bitcast i32 %val to float
+  ret float %sum
+}
+
+define float @lrint_i32_f32_simd_exp(float %x)  {
+; CHECK-LABEL: lrint_i32_f32_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define float @lrint_i32_f64_simd_exp(double %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i32_f64_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx d0, d0
+; CHECK-FPRCVT-NEXT:    fcvtzs s0, d0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i32_f64_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define double @lrint_i64_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i53.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lrint_i64_f32_simd_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f32_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f32_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lrint_i64_f64_simd_exp(double %x)  {
+; CHECK-LABEL: lrint_i64_f64_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.lrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Llrint strictfp
+;
+
+define double @llrint_i64_f16_simd_exp(half %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f16_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f16_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %sum = bitcast i64 %val to double
+  ret double %sum
+}
+
+define double @llrint_i64_f32_simd_exp(float %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f32_simd_exp:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f32_simd_exp:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @llrint_i64_f64_simd_exp(double %x)  {
+; CHECK-LABEL: llrint_i64_f64_simd_exp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll
new file mode 100644
index 0000000000000..8717952ea944a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-round-rint.ll
@@ -0,0 +1,286 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FPRCVT
+; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-NOFPRCVT
+
+;
+; Lround
+;
+
+define float @lround_i32_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i32_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas s0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i32_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.lround.i32.f16(half %x)
+  %sum = bitcast i32 %val to float
+  ret float %sum
+}
+
+define float @lround_i32_f32_simd(float %x)  {
+; CHECK-LABEL: lround_i32_f32_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas s0, s0
+; CHECK-NEXT:    ret
+  %val = call i32 @llvm.lround.i32.f32(float %x)
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define float @lround_i32_f64_simd(double %x)  {
+; CHECK-FPRCVT-LABEL: lround_i32_f64_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas s0, d0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i32_f64_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.lround.i32.f64(double %x)
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define double @lround_i64_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f16(half %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lround_i64_f32_simd(float %x)  {
+; CHECK-FPRCVT-LABEL: lround_i64_f32_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lround_i64_f32_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f32(float %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lround_i64_f64_simd(double %x)  {
+; CHECK-LABEL: lround_i64_f64_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.lround.i64.f64(double %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Llround
+;
+
+define double @llround_i64_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f16(half %x)
+  %sum = bitcast i64 %val to double
+  ret double %sum
+}
+
+define double @llround_i64_f32_simd(float %x)  {
+; CHECK-FPRCVT-LABEL: llround_i64_f32_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    fcvtas d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llround_i64_f32_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    fcvtas x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f32(float %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @llround_i64_f64_simd(double %x)  {
+; CHECK-LABEL: llround_i64_f64_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvtas d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.llround.i64.f64(double %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Lrint
+;
+
+define float @lrint_i32_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i32_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs s0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i32_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.lrint.i32.f16(half %x)
+  %sum = bitcast i32 %val to float
+  ret float %sum
+}
+
+define float @lrint_i32_f32_simd(float %x)  {
+; CHECK-LABEL: lrint_i32_f32_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx s0, s0
+; CHECK-NEXT:    fcvtzs s0, s0
+; CHECK-NEXT:    ret
+  %val = call i32 @llvm.lrint.i32.f32(float %x)
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define float @lrint_i32_f64_simd(double %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i32_f64_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx d0, d0
+; CHECK-FPRCVT-NEXT:    fcvtzs s0, d0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i32_f64_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx d0, d0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs w8, d0
+; CHECK-NOFPRCVT-NEXT:    fmov s0, w8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i32 @llvm.lrint.i32.f64(double %x)
+  %bc  = bitcast i32 %val to float
+  ret float %bc
+}
+
+define double @lrint_i64_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f16(half %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lrint_i64_f32_simd(float %x)  {
+; CHECK-FPRCVT-LABEL: lrint_i64_f32_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: lrint_i64_f32_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f32(float %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @lrint_i64_f64_simd(double %x)  {
+; CHECK-LABEL: lrint_i64_f64_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.lrint.i64.f64(double %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+;
+; Llrint
+;
+
+define double @llrint_i64_f16_simd(half %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f16_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx h0, h0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, h0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f16_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx h0, h0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, h0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f16(half %x)
+  %sum = bitcast i64 %val to double
+  ret double %sum
+}
+
+define double @llrint_i64_f32_simd(float %x)  {
+; CHECK-FPRCVT-LABEL: llrint_i64_f32_simd:
+; CHECK-FPRCVT:       // %bb.0:
+; CHECK-FPRCVT-NEXT:    frintx s0, s0
+; CHECK-FPRCVT-NEXT:    fcvtzs d0, s0
+; CHECK-FPRCVT-NEXT:    ret
+;
+; CHECK-NOFPRCVT-LABEL: llrint_i64_f32_simd:
+; CHECK-NOFPRCVT:       // %bb.0:
+; CHECK-NOFPRCVT-NEXT:    frintx s0, s0
+; CHECK-NOFPRCVT-NEXT:    fcvtzs x8, s0
+; CHECK-NOFPRCVT-NEXT:    fmov d0, x8
+; CHECK-NOFPRCVT-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f32(float %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}
+
+define double @llrint_i64_f64_simd(double %x)  {
+; CHECK-LABEL: llrint_i64_f64_simd:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    frintx d0, d0
+; CHECK-NEXT:    fcvtzs d0, d0
+; CHECK-NEXT:    ret
+  %val = call i64 @llvm.llrint.i64.f64(double %x)
+  %bc  = bitcast i64 %val to double
+  ret double %bc
+}


        


More information about the llvm-commits mailing list