[llvm-branch-commits] [llvm] 06c988c - [RISCV][VLOPT] Add support for vwsll.vx/vv. (#146998)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sun Jul 6 19:11:18 PDT 2025
Author: Craig Topper
Date: 2025-07-04T00:07:47-07:00
New Revision: 06c988cefd9194891f16feeb320ec4ef2d0fb5fd
URL: https://github.com/llvm/llvm-project/commit/06c988cefd9194891f16feeb320ec4ef2d0fb5fd
DIFF: https://github.com/llvm/llvm-project/commit/06c988cefd9194891f16feeb320ec4ef2d0fb5fd.diff
LOG: [RISCV][VLOPT] Add support for vwsll.vx/vv. (#146998)
Added:
Modified:
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 1dd12ab396fd8..a406bada6b1e4 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -517,6 +517,8 @@ getOperandLog2EEW(const MachineOperand &MO, const MachineRegisterInfo *MRI) {
case RISCV::VWSUB_VV:
case RISCV::VWSUB_VX:
case RISCV::VWSLL_VI:
+ case RISCV::VWSLL_VX:
+ case RISCV::VWSLL_VV:
// Vector Widening Integer Multiply Instructions
// Destination EEW=2*SEW. Source EEW=SEW.
case RISCV::VWMUL_VV:
@@ -1019,6 +1021,8 @@ static bool isSupportedInstr(const MachineInstr &MI) {
// Vector Crypto
case RISCV::VWSLL_VI:
+ case RISCV::VWSLL_VX:
+ case RISCV::VWSLL_VV:
// Vector Mask Instructions
// Vector Mask-Register Logical Instructions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 4c15d8e3e7c2f..c272297b41c94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -3081,24 +3081,66 @@ define <vscale x 1 x i8> @vmv_v_v(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <v
ret <vscale x 1 x i8> %3
}
-define <vscale x 4 x i32> @vwsll_vi(<vscale x 4 x i16> %a, <vscale x 4 x i32> %b, iXLen %vl) {
+define <vscale x 4 x i32> @vwsll_vi(<vscale x 4 x i16> %a, iXLen %vl) {
; NOVLOPT-LABEL: vwsll_vi:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; NOVLOPT-NEXT: vwsll.vi v12, v8, 1
+; NOVLOPT-NEXT: vwsll.vi v10, v8, 1
; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v12, v10
+; NOVLOPT-NEXT: vadd.vv v8, v10, v10
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vwsll_vi:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; VLOPT-NEXT: vwsll.vi v12, v8, 1
+; VLOPT-NEXT: vwsll.vi v10, v8, 1
; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; VLOPT-NEXT: vadd.vv v8, v12, v10
+; VLOPT-NEXT: vadd.vv v8, v10, v10
; VLOPT-NEXT: ret
- %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a,iXLen 1, iXLen -1)
- %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
+ %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen 1, iXLen -1)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
+ ret <vscale x 4 x i32> %2
+}
+
+define <vscale x 4 x i32> @vwsll_vx(<vscale x 4 x i16> %a, iXLen %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwsll_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; NOVLOPT-NEXT: vwsll.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v10, v10
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwsll_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; VLOPT-NEXT: vwsll.vx v10, v8, a0
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v10, v10
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, iXLen %b, iXLen -1)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
+ ret <vscale x 4 x i32> %2
+}
+
+define <vscale x 4 x i32> @vwsll_vv(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vwsll_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; NOVLOPT-NEXT: vwsll.vv v10, v8, v9
+; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT: vadd.vv v8, v10, v10
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vwsll_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; VLOPT-NEXT: vwsll.vv v10, v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vadd.vv v8, v10, v10
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i32> @llvm.riscv.vwsll.nxv4i32.nxv4i16(<vscale x 4 x i32> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
ret <vscale x 4 x i32> %2
}
More information about the llvm-branch-commits
mailing list