[llvm] 658ff0b - [RISCV][VLOPT] Add support for integer widening multiply instructions (#112204)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 16 06:37:33 PDT 2024
Author: Michael Maitland
Date: 2024-10-16T09:37:27-04:00
New Revision: 658ff0b84c9dd5f33f4d769ba7378cc2c64315a1
URL: https://github.com/llvm/llvm-project/commit/658ff0b84c9dd5f33f4d769ba7378cc2c64315a1
DIFF: https://github.com/llvm/llvm-project/commit/658ff0b84c9dd5f33f4d769ba7378cc2c64315a1.diff
LOG: [RISCV][VLOPT] Add support for integer widening multiply instructions (#112204)
This adds support for these instructions and also tests getOperandInfo
for these instructions as well.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 53373b7a0f1575..6053899987db9b 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -563,7 +563,12 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VREM_VV:
case RISCV::VREM_VX:
// Vector Widening Integer Multiply Instructions
- // FIXME: Add support
+ case RISCV::VWMUL_VV:
+ case RISCV::VWMUL_VX:
+ case RISCV::VWMULSU_VV:
+ case RISCV::VWMULSU_VX:
+ case RISCV::VWMULU_VV:
+ case RISCV::VWMULU_VX:
// Vector Single-Width Integer Multiply-Add Instructions
// FIXME: Add support
// Vector Widening Integer Multiply-Add Instructions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index a360ae1998f77a..11f603b56b6e56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1122,6 +1122,132 @@ define <vscale x 4 x i32> @vrem_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
ret <vscale x 4 x i32> %2
}
+define <vscale x 4 x i64> @vwmul_vv(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwmul_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; NOVLOPT-NEXT: vwmul.vv v12, v8, v9
+; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmul.vv v8, v12, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmul_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; VLOPT-NEXT: vwmul.vv v12, v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vwmul.vv v8, v12, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmul_vx(<vscale x 4 x i16> %a, i16 %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vwmul_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a3, zero, e16, m1, ta, ma
+; NOVLOPT-NEXT: vwmul.vx v12, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmul.vx v8, v12, a1
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmul_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a2, e16, m1, ta, ma
+; VLOPT-NEXT: vwmul.vx v12, v8, a0
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vwmul.vx v8, v12, a1
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, i16 %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %1, i32 %c, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmulsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwmulsu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmulsu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v12, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmulsu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vwmulsu.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v12, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmulsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwmulsu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmulsu.vx v12, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v12, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmulsu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vwmulsu.vx v12, v8, a0
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v12, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmulu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwmulu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmulu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v12, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmulu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vwmulu.vv v12, v8, v10
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v12, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmulu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwmulu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vwmulu.vx v12, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v12, v12
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwmulu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vwmulu.vx v12, v8, a0
+; VLOPT-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v12, v12
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
+ %2 = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %1, <vscale x 4 x i64> %1, iXLen %vl)
+ ret <vscale x 4 x i64> %2
+}
+
define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i16> %a, i16 %b, iXLen %vl) {
; NOVLOPT-LABEL: vwmacc_vx:
; NOVLOPT: # %bb.0:
More information about the llvm-commits
mailing list