[llvm] [RISCV] Add scalable vector patterns for vfwmaccbf16.v{v,f} (PR #106771)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 30 10:59:17 PDT 2024


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/106771

>From 84b18e7fc3621423b64728d4e269a3e1f604a3e7 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sat, 31 Aug 2024 01:37:21 +0800
Subject: [PATCH 1/3] Precommit tests

---
 .../CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll   | 265 ++++++++++++++++++
 1 file changed, 265 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
new file mode 100644
index 00000000000000..29b488bf6d3f95
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
@@ -0,0 +1,265 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
+
+define <vscale x 1 x float> @vfwmaccbf16_vv_nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x bfloat> %b, <vscale x 1 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv1f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v11, v9
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v9, v10
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv1f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v11, v9
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v9, v10
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFMIN-NEXT:    ret
+  %b.ext = fpext <vscale x 1 x bfloat> %b to <vscale x 1 x float>
+  %c.ext = fpext <vscale x 1 x bfloat> %c to <vscale x 1 x float>
+  %res = call <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float> %b.ext, <vscale x 1 x float> %c.ext, <vscale x 1 x float> %a)
+  ret <vscale x 1 x float> %res
+}
+
+define <vscale x 1 x float> @vfwmaccbf16_vf_nxv1f32(<vscale x 1 x float> %a, bfloat %b, <vscale x 1 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv1f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv1f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
+; ZVFBFMIN-NEXT:    slli a0, a0, 16
+; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFMIN-NEXT:    ret
+  %b.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
+  %b.splat = shufflevector <vscale x 1 x bfloat> %b.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
+  %b.ext = fpext <vscale x 1 x bfloat> %b.splat to <vscale x 1 x float>
+  %c.ext = fpext <vscale x 1 x bfloat> %c to <vscale x 1 x float>
+  %res = call <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float> %b.ext, <vscale x 1 x float> %c.ext, <vscale x 1 x float> %a)
+  ret <vscale x 1 x float> %res
+}
+
+define <vscale x 2 x float> @vfwmaccbf16_vv_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x bfloat> %b, <vscale x 2 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv2f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v11, v9
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v9, v10
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv2f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v11, v9
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v9, v10
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFMIN-NEXT:    ret
+  %b.ext = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x float>
+  %c.ext = fpext <vscale x 2 x bfloat> %c to <vscale x 2 x float>
+  %res = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %b.ext, <vscale x 2 x float> %c.ext, <vscale x 2 x float> %a)
+  ret <vscale x 2 x float> %res
+}
+
+define <vscale x 2 x float> @vfwmaccbf16_vf_nxv2f32(<vscale x 2 x float> %a, bfloat %b, <vscale x 2 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv2f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv2f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
+; ZVFBFMIN-NEXT:    slli a0, a0, 16
+; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFMIN-NEXT:    ret
+  %b.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
+  %b.splat = shufflevector <vscale x 2 x bfloat> %b.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
+  %b.ext = fpext <vscale x 2 x bfloat> %b.splat to <vscale x 2 x float>
+  %c.ext = fpext <vscale x 2 x bfloat> %c to <vscale x 2 x float>
+  %res = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %b.ext, <vscale x 2 x float> %c.ext, <vscale x 2 x float> %a)
+  ret <vscale x 2 x float> %res
+}
+
+define <vscale x 4 x float> @vfwmaccbf16_vv_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv4f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v12, v10
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v14, v11
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vv v8, v12, v14
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv4f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v12, v10
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v14, v11
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vv v8, v12, v14
+; ZVFBFMIN-NEXT:    ret
+  %b.ext = fpext <vscale x 4 x bfloat> %b to <vscale x 4 x float>
+  %c.ext = fpext <vscale x 4 x bfloat> %c to <vscale x 4 x float>
+  %res = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %b.ext, <vscale x 4 x float> %c.ext, <vscale x 4 x float> %a)
+  ret <vscale x 4 x float> %res
+}
+
+define <vscale x 4 x float> @vfwmaccbf16_vf_nxv4f32(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv4f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v12, v10
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v12
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv4f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
+; ZVFBFMIN-NEXT:    slli a0, a0, 16
+; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v12, v10
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v12
+; ZVFBFMIN-NEXT:    ret
+  %b.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
+  %b.splat = shufflevector <vscale x 4 x bfloat> %b.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
+  %b.ext = fpext <vscale x 4 x bfloat> %b.splat to <vscale x 4 x float>
+  %c.ext = fpext <vscale x 4 x bfloat> %c to <vscale x 4 x float>
+  %res = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %b.ext, <vscale x 4 x float> %c.ext, <vscale x 4 x float> %a)
+  ret <vscale x 4 x float> %res
+}
+
+define <vscale x 8 x float> @vfwmaccbf16_vv_nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv8f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v20, v14
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vv v8, v16, v20
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv8f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v20, v14
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vv v8, v16, v20
+; ZVFBFMIN-NEXT:    ret
+  %b.ext = fpext <vscale x 8 x bfloat> %b to <vscale x 8 x float>
+  %c.ext = fpext <vscale x 8 x bfloat> %c to <vscale x 8 x float>
+  %res = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> %b.ext, <vscale x 8 x float> %c.ext, <vscale x 8 x float> %a)
+  ret <vscale x 8 x float> %res
+}
+
+define <vscale x 8 x float> @vfwmaccbf16_vf_nxv8f32(<vscale x 8 x float> %a, bfloat %b, <vscale x 8 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv8f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v16
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv8f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
+; ZVFBFMIN-NEXT:    slli a0, a0, 16
+; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v16
+; ZVFBFMIN-NEXT:    ret
+  %b.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
+  %b.splat = shufflevector <vscale x 8 x bfloat> %b.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
+  %b.ext = fpext <vscale x 8 x bfloat> %b.splat to <vscale x 8 x float>
+  %c.ext = fpext <vscale x 8 x bfloat> %c to <vscale x 8 x float>
+  %res = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> %b.ext, <vscale x 8 x float> %c.ext, <vscale x 8 x float> %a)
+  ret <vscale x 8 x float> %res
+}
+
+define <vscale x 16 x float> @vfwmaccbf16_vv_nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x bfloat> %b, <vscale x 16 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv16f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v24, v16
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v0, v20
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vv v8, v24, v0
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv16f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v24, v16
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v0, v20
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vv v8, v24, v0
+; ZVFBFMIN-NEXT:    ret
+  %b.ext = fpext <vscale x 16 x bfloat> %b to <vscale x 16 x float>
+  %c.ext = fpext <vscale x 16 x bfloat> %c to <vscale x 16 x float>
+  %res = call <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float> %b.ext, <vscale x 16 x float> %c.ext, <vscale x 16 x float> %a)
+  ret <vscale x 16 x float> %res
+}
+
+define <vscale x 16 x float> @vfwmaccbf16_vf_nxv16f32(<vscale x 16 x float> %a, bfloat %b, <vscale x 16 x bfloat> %c) {
+; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv16f32:
+; ZVFBFWMA:       # %bb.0:
+; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v24, v16
+; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v24
+; ZVFBFWMA-NEXT:    ret
+;
+; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv16f32:
+; ZVFBFMIN:       # %bb.0:
+; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
+; ZVFBFMIN-NEXT:    slli a0, a0, 16
+; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v24, v16
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v24
+; ZVFBFMIN-NEXT:    ret
+  %b.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
+  %b.splat = shufflevector <vscale x 16 x bfloat> %b.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
+  %b.ext = fpext <vscale x 16 x bfloat> %b.splat to <vscale x 16 x float>
+  %c.ext = fpext <vscale x 16 x bfloat> %c to <vscale x 16 x float>
+  %res = call <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float> %b.ext, <vscale x 16 x float> %c.ext, <vscale x 16 x float> %a)
+  ret <vscale x 16 x float> %res
+}

>From 3ebf1f4bc1c5be43b6c2755356413239450628cf Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sat, 31 Aug 2024 01:39:12 +0800
Subject: [PATCH 2/3] [RISCV] Add patterns for vfwmaccbf16.v{v,f}

We can reuse the patterns for vfwmacc.v{v,f} as long as we swap out fpext_oneuse for riscv_fpextend_bf16 in the scalar case.

For some reason nxv1f32 gets type-legalized to nxv2f32, I haven't looked into why this happens yet.
---
 .../Target/RISCV/RISCVInstrInfoVSDPatterns.td | 20 ++++++--
 .../lib/Target/RISCV/RISCVInstrInfoZfbfmin.td |  4 ++
 .../CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll   | 50 ++++---------------
 3 files changed, 29 insertions(+), 45 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 8d64788b3cb7db..0f435c4ff3d315 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -676,13 +676,18 @@ multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF_RM<SDNode op,
     : VPatWidenBinaryFPSDNode_VV_VF_RM<op, instruction_name>,
       VPatWidenBinaryFPSDNode_WV_WF_RM<op, instruction_name>;
 
-multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> {
-  foreach vtiToWti = AllWidenableFloatVectors in {
+multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name,
+                                            list <VTypeInfoToWide> vtiToWtis,
+                                            PatFrags extop> {
+  foreach vtiToWti = vtiToWtis in {
     defvar vti = vtiToWti.Vti;
     defvar wti = vtiToWti.Wti;
     defvar suffix = vti.LMul.MX # "_E" # vti.SEW;
     let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
-                                 GetVTypePredicates<wti>.Predicates) in {
+                                 GetVTypePredicates<wti>.Predicates,
+                                 !if(!eq(vti.Scalar, bf16),
+                                     [HasStdExtZvfbfwma],
+                                     [])) in {
       def : Pat<(fma (wti.Vector (riscv_fpextend_vl_oneuse
                                       (vti.Vector vti.RegClass:$rs1),
                                       (vti.Mask true_mask), (XLenVT srcvalue))),
@@ -697,7 +702,7 @@ multiclass VPatWidenFPMulAccSDNode_VV_VF_RM<string instruction_name> {
                    FRM_DYN,
                    vti.AVL, vti.Log2SEW, TAIL_AGNOSTIC)>;
       def : Pat<(fma (wti.Vector (SplatFPOp
-                                      (fpext_oneuse (vti.Scalar vti.ScalarRegClass:$rs1)))),
+                                      (extop (vti.Scalar vti.ScalarRegClass:$rs1)))),
                      (wti.Vector (riscv_fpextend_vl_oneuse
                                       (vti.Vector vti.RegClass:$rs2),
                                       (vti.Mask true_mask), (XLenVT srcvalue))),
@@ -1284,7 +1289,12 @@ foreach fvti = AllFloatVectors in {
 }
 
 // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
-defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC">;
+defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACC",
+                                        AllWidenableFloatVectors,
+                                        fpext_oneuse>;
+defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16",
+                                        AllWidenableBFloatToFloatVectors,
+                                        riscv_fpextend_bf16_oneuse>;
 defm : VPatWidenFPNegMulAccSDNode_VV_VF_RM<"PseudoVFWNMACC">;
 defm : VPatWidenFPMulSacSDNode_VV_VF_RM<"PseudoVFWMSAC">;
 defm : VPatWidenFPNegMulSacSDNode_VV_VF_RM<"PseudoVFWNMSAC">;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfbfmin.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfbfmin.td
index d819033eea68c7..88b66e7fc49aad 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfbfmin.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfbfmin.td
@@ -26,6 +26,10 @@ def riscv_fpround_bf16
     : SDNode<"RISCVISD::FP_ROUND_BF16", SDT_RISCVFP_ROUND_BF16>;
 def riscv_fpextend_bf16
     : SDNode<"RISCVISD::FP_EXTEND_BF16", SDT_RISCVFP_EXTEND_BF16>;
+def riscv_fpextend_bf16_oneuse : PatFrag<(ops node:$A),
+                                         (riscv_fpextend_bf16 node:$A), [{
+  return N->hasOneUse();
+}]>;
 
 //===----------------------------------------------------------------------===//
 // Instructions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
index 29b488bf6d3f95..71f292e1d99170 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
@@ -8,10 +8,7 @@ define <vscale x 1 x float> @vfwmaccbf16_vv_nxv1f32(<vscale x 1 x float> %a, <vs
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv1f32:
 ; ZVFBFWMA:       # %bb.0:
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v11, v9
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v9, v10
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v9, v10
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv1f32:
@@ -31,11 +28,8 @@ define <vscale x 1 x float> @vfwmaccbf16_vv_nxv1f32(<vscale x 1 x float> %a, <vs
 define <vscale x 1 x float> @vfwmaccbf16_vf_nxv1f32(<vscale x 1 x float> %a, bfloat %b, <vscale x 1 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv1f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v10, v9
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v9
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv1f32:
@@ -60,10 +54,7 @@ define <vscale x 2 x float> @vfwmaccbf16_vv_nxv2f32(<vscale x 2 x float> %a, <vs
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv2f32:
 ; ZVFBFWMA:       # %bb.0:
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v11, v9
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v9, v10
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vv v8, v11, v9
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v9, v10
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv2f32:
@@ -83,11 +74,8 @@ define <vscale x 2 x float> @vfwmaccbf16_vv_nxv2f32(<vscale x 2 x float> %a, <vs
 define <vscale x 2 x float> @vfwmaccbf16_vf_nxv2f32(<vscale x 2 x float> %a, bfloat %b, <vscale x 2 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv2f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v10, v9
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v10
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v9
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv2f32:
@@ -112,10 +100,7 @@ define <vscale x 4 x float> @vfwmaccbf16_vv_nxv4f32(<vscale x 4 x float> %a, <vs
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv4f32:
 ; ZVFBFWMA:       # %bb.0:
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v12, v10
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v14, v11
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vv v8, v12, v14
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v10, v11
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv4f32:
@@ -135,11 +120,8 @@ define <vscale x 4 x float> @vfwmaccbf16_vv_nxv4f32(<vscale x 4 x float> %a, <vs
 define <vscale x 4 x float> @vfwmaccbf16_vf_nxv4f32(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv4f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v12, v10
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v12
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v10
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv4f32:
@@ -164,10 +146,7 @@ define <vscale x 8 x float> @vfwmaccbf16_vv_nxv8f32(<vscale x 8 x float> %a, <vs
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv8f32:
 ; ZVFBFWMA:       # %bb.0:
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v16, v12
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v20, v14
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vv v8, v16, v20
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v12, v14
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv8f32:
@@ -187,11 +166,8 @@ define <vscale x 8 x float> @vfwmaccbf16_vv_nxv8f32(<vscale x 8 x float> %a, <vs
 define <vscale x 8 x float> @vfwmaccbf16_vf_nxv8f32(<vscale x 8 x float> %a, bfloat %b, <vscale x 8 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv8f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v16, v12
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v16
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v12
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv8f32:
@@ -216,10 +192,7 @@ define <vscale x 16 x float> @vfwmaccbf16_vv_nxv16f32(<vscale x 16 x float> %a,
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv16f32:
 ; ZVFBFWMA:       # %bb.0:
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v24, v16
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v0, v20
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vv v8, v24, v0
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v16, v20
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv16f32:
@@ -239,11 +212,8 @@ define <vscale x 16 x float> @vfwmaccbf16_vv_nxv16f32(<vscale x 16 x float> %a,
 define <vscale x 16 x float> @vfwmaccbf16_vf_nxv16f32(<vscale x 16 x float> %a, bfloat %b, <vscale x 16 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv16f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    fcvt.s.bf16 fa5, fa0
 ; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; ZVFBFWMA-NEXT:    vfwcvtbf16.f.f.v v24, v16
-; ZVFBFWMA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFWMA-NEXT:    vfmacc.vf v8, fa5, v24
+; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v16
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vf_nxv16f32:

>From 952d0f36ae7997593d26e5ce6d8c20e8fcc1f7aa Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Sat, 31 Aug 2024 01:58:54 +0800
Subject: [PATCH 3/3] Add +v to tests

---
 .../CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll   | 20 +++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
index 71f292e1d99170..6682aa1e17a30f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmaccbf16-sdnode.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
-; RUN: llc < %s -mtriple=riscv64 -mattr=+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
-; RUN: llc < %s -mtriple=riscv32 -mattr=+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
-; RUN: llc < %s -mtriple=riscv64 -mattr=+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfwma -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFWMA
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s --check-prefix=ZVFBFMIN
 
 define <vscale x 1 x float> @vfwmaccbf16_vv_nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x bfloat> %b, <vscale x 1 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vv_nxv1f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFBFWMA-NEXT:    vfwmaccbf16.vv v8, v9, v10
 ; ZVFBFWMA-NEXT:    ret
 ;
 ; ZVFBFMIN-LABEL: vfwmaccbf16_vv_nxv1f32:
 ; ZVFBFMIN:       # %bb.0:
-; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v11, v9
 ; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v9, v10
-; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFBFMIN-NEXT:    vfmacc.vv v8, v11, v9
 ; ZVFBFMIN-NEXT:    ret
   %b.ext = fpext <vscale x 1 x bfloat> %b to <vscale x 1 x float>
@@ -28,7 +28,7 @@ define <vscale x 1 x float> @vfwmaccbf16_vv_nxv1f32(<vscale x 1 x float> %a, <vs
 define <vscale x 1 x float> @vfwmaccbf16_vf_nxv1f32(<vscale x 1 x float> %a, bfloat %b, <vscale x 1 x bfloat> %c) {
 ; ZVFBFWMA-LABEL: vfwmaccbf16_vf_nxv1f32:
 ; ZVFBFWMA:       # %bb.0:
-; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFWMA-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFBFWMA-NEXT:    vfwmaccbf16.vf v8, fa0, v9
 ; ZVFBFWMA-NEXT:    ret
 ;
@@ -37,9 +37,9 @@ define <vscale x 1 x float> @vfwmaccbf16_vf_nxv1f32(<vscale x 1 x float> %a, bfl
 ; ZVFBFMIN-NEXT:    fmv.x.w a0, fa0
 ; ZVFBFMIN-NEXT:    slli a0, a0, 16
 ; ZVFBFMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFBFMIN-NEXT:    vfwcvtbf16.f.f.v v10, v9
-; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFBFMIN-NEXT:    vfmacc.vf v8, fa5, v10
 ; ZVFBFMIN-NEXT:    ret
   %b.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0



More information about the llvm-commits mailing list