[llvm] [RISCV][VLOPT] Add support for Widening Floating-Point Fused Multiply-Add Instructions (PR #126485)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 10 00:58:10 PST 2025
https://github.com/lukel97 created https://github.com/llvm/llvm-project/pull/126485
We already had getOperandInfo support, so this marks the instructions as supported in isCandidate. It also adds support for vfwmaccbf16.v{v,f} from zvfbfwma
>From 52b9c5e447bcf1aca7ee337a97865bf1daa433a9 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 10 Feb 2025 16:52:42 +0800
Subject: [PATCH 1/2] Precommit tests
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 218 ++++++++++++++++++-
1 file changed, 214 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 053f1209cf21464..ffd702bedf0986f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
; The purpose of this file is to check the behavior of specific instructions as it relates to the VL optimizer
@@ -4351,3 +4351,213 @@ define <vscale x 4 x float> @vfnmsub_vf(<vscale x 4 x float> %a, float %b, <vsca
%2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %c, iXLen 7, iXLen %vl)
ret <vscale x 4 x float> %2
}
+
+define <vscale x 4 x double> @vfwmacc_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmacc_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwmacc.vv v8, v12, v14
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmacc_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwmacc.vv v8, v12, v14
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwmacc_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmacc_vf:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwmacc.vf v8, fa0, v12
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmacc_vf:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwmacc.vf v8, fa0, v12
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwnmacc_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwnmacc_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwnmacc.vv v8, v12, v14
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwnmacc_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwnmacc.vv v8, v12, v14
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwnmacc_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwnmacc_vf:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwnmacc.vf v8, fa0, v12
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwnmacc_vf:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwnmacc.vf v8, fa0, v12
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwmsac_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmsac_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwmsac.vv v8, v12, v14
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmsac_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwmsac.vv v8, v12, v14
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwmsac_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmsac_vf:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwmsac.vf v8, fa0, v12
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmsac_vf:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwmsac.vf v8, fa0, v12
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwnmsac_vv(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwnmsac_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwnmsac.vv v8, v12, v14
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwnmsac_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwnmsac.vv v8, v12, v14
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x double> @vfwnmsac_vf(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x double> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwnmsac_vf:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; NOVLOPT-NEXT: vfwnmsac.vf v8, fa0, v12
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v16
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwnmsac_vf:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vfwnmsac.vf v8, fa0, v12
+; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v16
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x double> @llvm.riscv.vfadd(<vscale x 4 x double> poison, <vscale x 4 x double> %1, <vscale x 4 x double> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x double> %2
+}
+
+define <vscale x 4 x float> @vfwmaccbf16_vv(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c, <vscale x 4 x float> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmaccbf16_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT: vfwmaccbf16.vv v8, v10, v11
+; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmaccbf16_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; VLOPT-NEXT: vfwmaccbf16.vv v8, v10, v11
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x float> %2
+}
+
+define <vscale x 4 x float> @vfwmaccbf16_vf(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c, <vscale x 4 x float> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vfwmaccbf16_vf:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT: vfwmaccbf16.vf v8, fa0, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT: vfadd.vv v8, v8, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vfwmaccbf16_vf:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; VLOPT-NEXT: vfwmaccbf16.vf v8, fa0, v10
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vfadd.vv v8, v8, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0)
+ %2 = call <vscale x 4 x float> @llvm.riscv.vfadd(<vscale x 4 x float> poison, <vscale x 4 x float> %1, <vscale x 4 x float> %d, iXLen 7, iXLen %vl)
+ ret <vscale x 4 x float> %2
+}
>From 9c677cd06d1bd8971513a608dabfbe99cbd8b043 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 10 Feb 2025 16:53:36 +0800
Subject: [PATCH 2/2] [RISCV][VLOPT] Add support for Widening Floating-Point
Fused Multiply-Add Instructions
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 13 +++++++
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 40 ++++++++++----------
2 files changed, 33 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index d4829bced247091..6c19a8fd32d42e7 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -545,6 +545,8 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
case RISCV::VFWMSAC_VV:
case RISCV::VFWNMSAC_VF:
case RISCV::VFWNMSAC_VV:
+ case RISCV::VFWMACCBF16_VV:
+ case RISCV::VFWMACCBF16_VF:
// Vector Widening Floating-Point Add/Subtract Instructions
// Dest EEW=2*SEW. Source EEW=SEW.
case RISCV::VFWADD_VV:
@@ -1050,6 +1052,17 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VFMSUB_VF:
case RISCV::VFNMSUB_VV:
case RISCV::VFNMSUB_VF:
+ // Vector Widening Floating-Point Fused Multiply-Add Instructions
+ case RISCV::VFWMACC_VV:
+ case RISCV::VFWMACC_VF:
+ case RISCV::VFWNMACC_VV:
+ case RISCV::VFWNMACC_VF:
+ case RISCV::VFWMSAC_VV:
+ case RISCV::VFWMSAC_VF:
+ case RISCV::VFWNMSAC_VV:
+ case RISCV::VFWNMSAC_VF:
+ case RISCV::VFWMACCBF16_VV:
+ case RISCV::VFWMACCBF16_VF:
// Vector Floating-Point MIN/MAX Instructions
case RISCV::VFMIN_VF:
case RISCV::VFMIN_VV:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index ffd702bedf0986f..f4591a191c8b76c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -4363,9 +4363,9 @@ define <vscale x 4 x double> @vfwmacc_vv(<vscale x 4 x double> %a, <vscale x 4 x
;
; VLOPT-LABEL: vfwmacc_vv:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwmacc.vv v8, v12, v14
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4384,9 +4384,9 @@ define <vscale x 4 x double> @vfwmacc_vf(<vscale x 4 x double> %a, float %b, <vs
;
; VLOPT-LABEL: vfwmacc_vf:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwmacc.vf v8, fa0, v12
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4405,9 +4405,9 @@ define <vscale x 4 x double> @vfwnmacc_vv(<vscale x 4 x double> %a, <vscale x 4
;
; VLOPT-LABEL: vfwnmacc_vv:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwnmacc.vv v8, v12, v14
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4426,9 +4426,9 @@ define <vscale x 4 x double> @vfwnmacc_vf(<vscale x 4 x double> %a, float %b, <v
;
; VLOPT-LABEL: vfwnmacc_vf:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwnmacc.vf v8, fa0, v12
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwnmacc(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4447,9 +4447,9 @@ define <vscale x 4 x double> @vfwmsac_vv(<vscale x 4 x double> %a, <vscale x 4 x
;
; VLOPT-LABEL: vfwmsac_vv:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwmsac.vv v8, v12, v14
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4468,9 +4468,9 @@ define <vscale x 4 x double> @vfwmsac_vf(<vscale x 4 x double> %a, float %b, <vs
;
; VLOPT-LABEL: vfwmsac_vf:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwmsac.vf v8, fa0, v12
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4489,9 +4489,9 @@ define <vscale x 4 x double> @vfwnmsac_vv(<vscale x 4 x double> %a, <vscale x 4
;
; VLOPT-LABEL: vfwnmsac_vv:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwnmsac.vv v8, v12, v14
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4510,9 +4510,9 @@ define <vscale x 4 x double> @vfwnmsac_vf(<vscale x 4 x double> %a, float %b, <v
;
; VLOPT-LABEL: vfwnmsac_vf:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, tu, ma
; VLOPT-NEXT: vfwnmsac.vf v8, fa0, v12
-; VLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v16
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x double> @llvm.riscv.vfwnmsac(<vscale x 4 x double> %a, float %b, <vscale x 4 x float> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4531,9 +4531,9 @@ define <vscale x 4 x float> @vfwmaccbf16_vv(<vscale x 4 x float> %a, <vscale x 4
;
; VLOPT-LABEL: vfwmaccbf16_vv:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; VLOPT-NEXT: vfwmaccbf16.vv v8, v10, v11
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v12
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0)
@@ -4552,9 +4552,9 @@ define <vscale x 4 x float> @vfwmaccbf16_vf(<vscale x 4 x float> %a, bfloat %b,
;
; VLOPT-LABEL: vfwmaccbf16_vf:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e16, m1, tu, ma
+; VLOPT-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; VLOPT-NEXT: vfwmaccbf16.vf v8, fa0, v10
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; VLOPT-NEXT: vfadd.vv v8, v8, v12
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16(<vscale x 4 x float> %a, bfloat %b, <vscale x 4 x bfloat> %c, iXLen 7, iXLen -1, iXLen 0)
More information about the llvm-commits
mailing list