[llvm] [LoongArch] Use LSX for scalar FP rounding with explicit rounding mode (PR #114766)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 5 05:29:43 PST 2024
https://github.com/zhaoqi5 updated https://github.com/llvm/llvm-project/pull/114766
>From a61e0bac553efaf466d4b8b8931cd0ae63d5b6bf Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Sat, 2 Nov 2024 18:38:40 +0800
Subject: [PATCH 1/3] [LoongArch] Use LSX for scalar FP rounding with explicit
rounding mode
LoongArch FP base ISA only have frint.{s/d} instruction
which reads the global rounding mode. Utilize LSX for
explicit rounding mode for scalar ceil/floor/trunc/roundeven
calls when -mlsx opend. It is faster than calling the libm
library functions.
---
.../LoongArch/LoongArchISelLowering.cpp | 4 +
.../Target/LoongArch/LoongArchLSXInstrInfo.td | 26 ++++
.../CodeGen/LoongArch/lsx/vector-fp-conv.ll | 123 ++++++++++++++++++
3 files changed, 153 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 6bee00d1ce3823..fde1a6acc0fde3 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -286,6 +286,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
VT, Expand);
}
setOperationAction(ISD::CTPOP, GRLenVT, Legal);
+ setOperationAction(ISD::FCEIL, {MVT::f32, MVT::f64}, Legal);
+ setOperationAction(ISD::FFLOOR, {MVT::f32, MVT::f64}, Legal);
+ setOperationAction(ISD::FTRUNC, {MVT::f32, MVT::f64}, Legal);
+ setOperationAction(ISD::FROUNDEVEN, {MVT::f32, MVT::f64}, Legal);
}
// Set operations for 'LASX' feature.
diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 525d2802daa235..25e70b4e6b35ae 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -2259,6 +2259,32 @@ def : Pat<(loongarch_vfrsqrte v2f64:$src),
(VFRSQRTE_D v2f64:$src)>;
}
+// Vector floating-point conversion
+def : Pat<(f32 (fceil FPR32:$fj)),
+ (f32 (EXTRACT_SUBREG (VFRINTRP_S (VREPLVEI_W
+ (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), 0)), sub_32))>;
+def : Pat<(f64 (fceil FPR64:$fj)),
+ (f64 (EXTRACT_SUBREG (VFRINTRP_D (VREPLVEI_D
+ (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), 0)), sub_64))>;
+def : Pat<(f32 (ffloor FPR32:$fj)),
+ (f32 (EXTRACT_SUBREG (VFRINTRM_S (VREPLVEI_W
+ (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), 0)), sub_32))>;
+def : Pat<(f64 (ffloor FPR64:$fj)),
+ (f64 (EXTRACT_SUBREG (VFRINTRM_D (VREPLVEI_D
+ (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), 0)), sub_64))>;
+def : Pat<(f32 (ftrunc FPR32:$fj)),
+ (f32 (EXTRACT_SUBREG (VFRINTRZ_S (VREPLVEI_W
+ (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), 0)), sub_32))>;
+def : Pat<(f64 (ftrunc FPR64:$fj)),
+ (f64 (EXTRACT_SUBREG (VFRINTRZ_D (VREPLVEI_D
+ (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), 0)), sub_64))>;
+def : Pat<(f32 (froundeven FPR32:$fj)),
+ (f32 (EXTRACT_SUBREG (VFRINTRNE_S (VREPLVEI_W
+ (SUBREG_TO_REG (i64 0), FPR32:$fj, sub_32), 0)), sub_32))>;
+def : Pat<(f64 (froundeven FPR64:$fj)),
+ (f64 (EXTRACT_SUBREG (VFRINTRNE_D (VREPLVEI_D
+ (SUBREG_TO_REG (i64 0), FPR64:$fj, sub_64), 0)), sub_64))>;
+
// load
def : Pat<(int_loongarch_lsx_vld GPR:$rj, timm:$imm),
(VLD GPR:$rj, (to_valid_timm timm:$imm))>;
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll b/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
new file mode 100644
index 00000000000000..b2c618d2824e1f
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+;; ceilf
+define float @ceil_f32(float %i) nounwind {
+; CHECK-LABEL: ceil_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrp.s $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.ceil.f32(float %i)
+ ret float %0
+}
+
+;; ceil
+define double @ceil_f64(double %i) nounwind {
+; CHECK-LABEL: ceil_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrp.d $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call double @llvm.ceil.f64(double %i)
+ ret double %0
+}
+
+;; floorf
+define float @floor_f32(float %i) nounwind {
+; CHECK-LABEL: floor_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrm.s $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.floor.f32(float %i)
+ ret float %0
+}
+
+;; floor
+define double @floor_f64(double %i) nounwind {
+; CHECK-LABEL: floor_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrm.d $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call double @llvm.floor.f64(double %i)
+ ret double %0
+}
+
+;; truncf
+define float @trunc_f32(float %i) nounwind {
+; CHECK-LABEL: trunc_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrz.s $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.trunc.f32(float %i)
+ ret float %0
+}
+
+;; trunc
+define double @trunc_f64(double %i) nounwind {
+; CHECK-LABEL: trunc_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrz.d $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call double @llvm.trunc.f64(double %i)
+ ret double %0
+}
+
+;; roundevenf
+define float @roundeven_f32(float %i) nounwind {
+; CHECK-LABEL: roundeven_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrne.s $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call float @llvm.roundeven.f32(float %i)
+ ret float %0
+}
+
+;; roundeven
+define double @roundeven_f64(double %i) nounwind {
+; CHECK-LABEL: roundeven_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: vfrintrne.d $vr0, $vr0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: ret
+entry:
+ %0 = call double @llvm.roundeven.f64(double %i)
+ ret double %0
+}
+
+declare float @llvm.ceil.f32(float)
+declare double @llvm.ceil.f64(double)
+declare float @llvm.floor.f32(float)
+declare double @llvm.floor.f64(double)
+declare float @llvm.trunc.f32(float)
+declare double @llvm.trunc.f64(double)
+declare float @llvm.roundeven.f32(float)
+declare double @llvm.roundeven.f64(double)
>From e9cfe921b1f914cc372820e349a7d1d7deb009bd Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 5 Nov 2024 10:24:26 +0800
Subject: [PATCH 2/3] update tests
---
llvm/test/CodeGen/LoongArch/fp-conversion.ll | 156 ++++++++++++++++++
.../CodeGen/LoongArch/lsx/vector-fp-conv.ll | 123 --------------
2 files changed, 156 insertions(+), 123 deletions(-)
create mode 100644 llvm/test/CodeGen/LoongArch/fp-conversion.ll
delete mode 100644 llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
diff --git a/llvm/test/CodeGen/LoongArch/fp-conversion.ll b/llvm/test/CodeGen/LoongArch/fp-conversion.ll
new file mode 100644
index 00000000000000..2f4d4eafafe0b9
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/fp-conversion.ll
@@ -0,0 +1,156 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=-lsx < %s | FileCheck %s --check-prefix=NOLSX
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefix=LSX
+
+;; ceilf
+define float @ceil_f32(float %i) nounwind {
+; NOLSX-LABEL: ceil_f32:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(ceilf)
+;
+; LSX-LABEL: ceil_f32:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrp.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call float @llvm.ceil.f32(float %i)
+ ret float %0
+}
+
+;; ceil
+define double @ceil_f64(double %i) nounwind {
+; NOLSX-LABEL: ceil_f64:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(ceil)
+;
+; LSX-LABEL: ceil_f64:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrp.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call double @llvm.ceil.f64(double %i)
+ ret double %0
+}
+
+;; floorf
+define float @floor_f32(float %i) nounwind {
+; NOLSX-LABEL: floor_f32:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(floorf)
+;
+; LSX-LABEL: floor_f32:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrm.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call float @llvm.floor.f32(float %i)
+ ret float %0
+}
+
+;; floor
+define double @floor_f64(double %i) nounwind {
+; NOLSX-LABEL: floor_f64:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(floor)
+;
+; LSX-LABEL: floor_f64:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrm.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call double @llvm.floor.f64(double %i)
+ ret double %0
+}
+
+;; truncf
+define float @trunc_f32(float %i) nounwind {
+; NOLSX-LABEL: trunc_f32:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(truncf)
+;
+; LSX-LABEL: trunc_f32:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrz.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call float @llvm.trunc.f32(float %i)
+ ret float %0
+}
+
+;; trunc
+define double @trunc_f64(double %i) nounwind {
+; NOLSX-LABEL: trunc_f64:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(trunc)
+;
+; LSX-LABEL: trunc_f64:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrz.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call double @llvm.trunc.f64(double %i)
+ ret double %0
+}
+
+;; roundevenf
+define float @roundeven_f32(float %i) nounwind {
+; NOLSX-LABEL: roundeven_f32:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(roundevenf)
+;
+; LSX-LABEL: roundeven_f32:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrne.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call float @llvm.roundeven.f32(float %i)
+ ret float %0
+}
+
+;; roundeven
+define double @roundeven_f64(double %i) nounwind {
+; NOLSX-LABEL: roundeven_f64:
+; NOLSX: # %bb.0: # %entry
+; NOLSX-NEXT: b %plt(roundeven)
+;
+; LSX-LABEL: roundeven_f64:
+; LSX: # %bb.0: # %entry
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrne.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
+entry:
+ %0 = call double @llvm.roundeven.f64(double %i)
+ ret double %0
+}
+
+declare float @llvm.ceil.f32(float)
+declare double @llvm.ceil.f64(double)
+declare float @llvm.floor.f32(float)
+declare double @llvm.floor.f64(double)
+declare float @llvm.trunc.f32(float)
+declare double @llvm.trunc.f64(double)
+declare float @llvm.roundeven.f32(float)
+declare double @llvm.roundeven.f64(double)
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll b/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
deleted file mode 100644
index b2c618d2824e1f..00000000000000
--- a/llvm/test/CodeGen/LoongArch/lsx/vector-fp-conv.ll
+++ /dev/null
@@ -1,123 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
-
-;; ceilf
-define float @ceil_f32(float %i) nounwind {
-; CHECK-LABEL: ceil_f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrp.s $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call float @llvm.ceil.f32(float %i)
- ret float %0
-}
-
-;; ceil
-define double @ceil_f64(double %i) nounwind {
-; CHECK-LABEL: ceil_f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrp.d $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call double @llvm.ceil.f64(double %i)
- ret double %0
-}
-
-;; floorf
-define float @floor_f32(float %i) nounwind {
-; CHECK-LABEL: floor_f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrm.s $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call float @llvm.floor.f32(float %i)
- ret float %0
-}
-
-;; floor
-define double @floor_f64(double %i) nounwind {
-; CHECK-LABEL: floor_f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrm.d $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call double @llvm.floor.f64(double %i)
- ret double %0
-}
-
-;; truncf
-define float @trunc_f32(float %i) nounwind {
-; CHECK-LABEL: trunc_f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrz.s $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call float @llvm.trunc.f32(float %i)
- ret float %0
-}
-
-;; trunc
-define double @trunc_f64(double %i) nounwind {
-; CHECK-LABEL: trunc_f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrz.d $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call double @llvm.trunc.f64(double %i)
- ret double %0
-}
-
-;; roundevenf
-define float @roundeven_f32(float %i) nounwind {
-; CHECK-LABEL: roundeven_f32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
-; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrne.s $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call float @llvm.roundeven.f32(float %i)
- ret float %0
-}
-
-;; roundeven
-define double @roundeven_f64(double %i) nounwind {
-; CHECK-LABEL: roundeven_f64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
-; CHECK-NEXT: vfrintrne.d $vr0, $vr0
-; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; CHECK-NEXT: ret
-entry:
- %0 = call double @llvm.roundeven.f64(double %i)
- ret double %0
-}
-
-declare float @llvm.ceil.f32(float)
-declare double @llvm.ceil.f64(double)
-declare float @llvm.floor.f32(float)
-declare double @llvm.floor.f64(double)
-declare float @llvm.trunc.f32(float)
-declare double @llvm.trunc.f64(double)
-declare float @llvm.roundeven.f32(float)
-declare double @llvm.roundeven.f64(double)
>From f60db3fbb8b5ae1fc4e9ce2562236b008ddc396d Mon Sep 17 00:00:00 2001
From: Qi Zhao <zhaoqi01 at loongson.cn>
Date: Tue, 5 Nov 2024 21:23:35 +0800
Subject: [PATCH 3/3] update tests after precommit
---
llvm/test/CodeGen/LoongArch/fp-conversion.ll | 156 -------------------
llvm/test/CodeGen/LoongArch/fp-rounding.ll | 48 +++++-
2 files changed, 40 insertions(+), 164 deletions(-)
delete mode 100644 llvm/test/CodeGen/LoongArch/fp-conversion.ll
diff --git a/llvm/test/CodeGen/LoongArch/fp-conversion.ll b/llvm/test/CodeGen/LoongArch/fp-conversion.ll
deleted file mode 100644
index 2f4d4eafafe0b9..00000000000000
--- a/llvm/test/CodeGen/LoongArch/fp-conversion.ll
+++ /dev/null
@@ -1,156 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc --mtriple=loongarch64 --mattr=-lsx < %s | FileCheck %s --check-prefix=NOLSX
-; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefix=LSX
-
-;; ceilf
-define float @ceil_f32(float %i) nounwind {
-; NOLSX-LABEL: ceil_f32:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(ceilf)
-;
-; LSX-LABEL: ceil_f32:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
-; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
-; LSX-NEXT: vfrintrp.s $vr0, $vr0
-; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call float @llvm.ceil.f32(float %i)
- ret float %0
-}
-
-;; ceil
-define double @ceil_f64(double %i) nounwind {
-; NOLSX-LABEL: ceil_f64:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(ceil)
-;
-; LSX-LABEL: ceil_f64:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
-; LSX-NEXT: vfrintrp.d $vr0, $vr0
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call double @llvm.ceil.f64(double %i)
- ret double %0
-}
-
-;; floorf
-define float @floor_f32(float %i) nounwind {
-; NOLSX-LABEL: floor_f32:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(floorf)
-;
-; LSX-LABEL: floor_f32:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
-; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
-; LSX-NEXT: vfrintrm.s $vr0, $vr0
-; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call float @llvm.floor.f32(float %i)
- ret float %0
-}
-
-;; floor
-define double @floor_f64(double %i) nounwind {
-; NOLSX-LABEL: floor_f64:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(floor)
-;
-; LSX-LABEL: floor_f64:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
-; LSX-NEXT: vfrintrm.d $vr0, $vr0
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call double @llvm.floor.f64(double %i)
- ret double %0
-}
-
-;; truncf
-define float @trunc_f32(float %i) nounwind {
-; NOLSX-LABEL: trunc_f32:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(truncf)
-;
-; LSX-LABEL: trunc_f32:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
-; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
-; LSX-NEXT: vfrintrz.s $vr0, $vr0
-; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call float @llvm.trunc.f32(float %i)
- ret float %0
-}
-
-;; trunc
-define double @trunc_f64(double %i) nounwind {
-; NOLSX-LABEL: trunc_f64:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(trunc)
-;
-; LSX-LABEL: trunc_f64:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
-; LSX-NEXT: vfrintrz.d $vr0, $vr0
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call double @llvm.trunc.f64(double %i)
- ret double %0
-}
-
-;; roundevenf
-define float @roundeven_f32(float %i) nounwind {
-; NOLSX-LABEL: roundeven_f32:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(roundevenf)
-;
-; LSX-LABEL: roundeven_f32:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
-; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
-; LSX-NEXT: vfrintrne.s $vr0, $vr0
-; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call float @llvm.roundeven.f32(float %i)
- ret float %0
-}
-
-;; roundeven
-define double @roundeven_f64(double %i) nounwind {
-; NOLSX-LABEL: roundeven_f64:
-; NOLSX: # %bb.0: # %entry
-; NOLSX-NEXT: b %plt(roundeven)
-;
-; LSX-LABEL: roundeven_f64:
-; LSX: # %bb.0: # %entry
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
-; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
-; LSX-NEXT: vfrintrne.d $vr0, $vr0
-; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
-; LSX-NEXT: ret
-entry:
- %0 = call double @llvm.roundeven.f64(double %i)
- ret double %0
-}
-
-declare float @llvm.ceil.f32(float)
-declare double @llvm.ceil.f64(double)
-declare float @llvm.floor.f32(float)
-declare double @llvm.floor.f64(double)
-declare float @llvm.trunc.f32(float)
-declare double @llvm.trunc.f64(double)
-declare float @llvm.roundeven.f32(float)
-declare double @llvm.roundeven.f64(double)
diff --git a/llvm/test/CodeGen/LoongArch/fp-rounding.ll b/llvm/test/CodeGen/LoongArch/fp-rounding.ll
index 19c4e3fb573da9..2f4d4eafafe0b9 100644
--- a/llvm/test/CodeGen/LoongArch/fp-rounding.ll
+++ b/llvm/test/CodeGen/LoongArch/fp-rounding.ll
@@ -10,7 +10,11 @@ define float @ceil_f32(float %i) nounwind {
;
; LSX-LABEL: ceil_f32:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(ceilf)
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrp.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call float @llvm.ceil.f32(float %i)
ret float %0
@@ -24,7 +28,11 @@ define double @ceil_f64(double %i) nounwind {
;
; LSX-LABEL: ceil_f64:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(ceil)
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrp.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call double @llvm.ceil.f64(double %i)
ret double %0
@@ -38,7 +46,11 @@ define float @floor_f32(float %i) nounwind {
;
; LSX-LABEL: floor_f32:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(floorf)
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrm.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call float @llvm.floor.f32(float %i)
ret float %0
@@ -52,7 +64,11 @@ define double @floor_f64(double %i) nounwind {
;
; LSX-LABEL: floor_f64:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(floor)
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrm.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call double @llvm.floor.f64(double %i)
ret double %0
@@ -66,7 +82,11 @@ define float @trunc_f32(float %i) nounwind {
;
; LSX-LABEL: trunc_f32:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(truncf)
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrz.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call float @llvm.trunc.f32(float %i)
ret float %0
@@ -80,7 +100,11 @@ define double @trunc_f64(double %i) nounwind {
;
; LSX-LABEL: trunc_f64:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(trunc)
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrz.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call double @llvm.trunc.f64(double %i)
ret double %0
@@ -94,7 +118,11 @@ define float @roundeven_f32(float %i) nounwind {
;
; LSX-LABEL: roundeven_f32:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(roundevenf)
+; LSX-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LSX-NEXT: vreplvei.w $vr0, $vr0, 0
+; LSX-NEXT: vfrintrne.s $vr0, $vr0
+; LSX-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call float @llvm.roundeven.f32(float %i)
ret float %0
@@ -108,7 +136,11 @@ define double @roundeven_f64(double %i) nounwind {
;
; LSX-LABEL: roundeven_f64:
; LSX: # %bb.0: # %entry
-; LSX-NEXT: b %plt(roundeven)
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0
+; LSX-NEXT: vreplvei.d $vr0, $vr0, 0
+; LSX-NEXT: vfrintrne.d $vr0, $vr0
+; LSX-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LSX-NEXT: ret
entry:
%0 = call double @llvm.roundeven.f64(double %i)
ret double %0
More information about the llvm-commits
mailing list