[llvm] [ISel/RISCV] Custom-promote [b]f16 in [l]lrint (PR #146507)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 1 12:19:22 PDT 2025
================
@@ -226,3 +226,297 @@ define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
ret <vscale x 8 x iXLen> %a
}
declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 1 x iXLen> @lrint_nxv1f16(<vscale x 1 x half> %x) {
+; RV32-LABEL: lrint_nxv1f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv1f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f16(<vscale x 1 x half> %x)
+ ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x iXLen> @lrint_nxv2f16(<vscale x 2 x half> %x) {
+; RV32-LABEL: lrint_nxv2f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv2f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f16(<vscale x 2 x half> %x)
+ ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x iXLen> @lrint_nxv4f16(<vscale x 4 x half> %x) {
+; RV32-LABEL: lrint_nxv4f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv4f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f16(<vscale x 4 x half> %x)
+ ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x iXLen> @lrint_nxv8f16(<vscale x 8 x half> %x) {
+; RV32-LABEL: lrint_nxv8f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv8f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f16(<vscale x 8 x half> %x)
+ ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x iXLen> @lrint_nxv16f16(<vscale x 16 x half> %x) {
+; RV32-LABEL: lrint_nxv16f16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vfwcvt.f.f.v v16, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv16f16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv16f16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT: vfwcvt.f.f.v v24, v10
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f16(<vscale x 16 x half> %x)
+ ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 1 x iXLen> @lrint_nxv1bf16(<vscale x 1 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv1bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv1bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat> %x)
+ ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat>)
+
+define <vscale x 2 x iXLen> @lrint_nxv2bf16(<vscale x 2 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv2bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv2bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat> %x)
+ ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat>)
+
+define <vscale x 4 x iXLen> @lrint_nxv4bf16(<vscale x 4 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv4bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv4bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v10, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat> %x)
+ ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat>)
+
+define <vscale x 8 x iXLen> @lrint_nxv8bf16(<vscale x 8 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v12
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv8bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v12, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat> %x)
+ ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat>)
+
+define <vscale x 16 x iXLen> @lrint_nxv16bf16(<vscale x 16 x bfloat> %x) {
+; RV32-LABEL: lrint_nxv16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v8, v16
+; RV32-NEXT: ret
+;
+; RV64-i32-LABEL: lrint_nxv16bf16:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv16bf16:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT: vfwcvtbf16.f.f.v v24, v10
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT: ret
+ %a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat> %x)
+ ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat>)
----------------
topperc wrote:
Please add test where the bf16 type uses LMUL=8 which would cause the promotion to need to be split.
https://github.com/llvm/llvm-project/pull/146507
More information about the llvm-commits
mailing list