[llvm] [RISCV][VLOPT] Add support for vector integer add-with-carry/subtract-with-borrow instructions (PR #148247)
Mikhail R. Gadelha via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 11 07:24:58 PDT 2025
https://github.com/mikhailramalho created https://github.com/llvm/llvm-project/pull/148247
This PR adds support for the vmadc.vim, vmadc.vvm, vmadc.vxm, vmsbc.vvm, vmsbc.vxm, vsbc.vvm, vsbc.vxm instructions in the RISC-V VLOptimizer.
This is the second PR addressing the list at #147647.
>From 0a2a2ee03fdb785945b1399ad5eecfe8ea663c24 Mon Sep 17 00:00:00 2001
From: "Mikhail R. Gadelha" <mikhail at igalia.com>
Date: Thu, 10 Jul 2025 17:09:01 -0300
Subject: [PATCH 1/2] RISCV][VLOPT] Add support for vector integer
add-with-carry/subtract-with-borrow instructions
Signed-off-by: Mikhail R. Gadelha <mikhail at igalia.com>
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 2d9f38221d424..d4a417d43050f 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -966,6 +966,13 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VADC_VIM:
case RISCV::VADC_VVM:
case RISCV::VADC_VXM:
+ case RISCV::VMADC_VIM:
+ case RISCV::VMADC_VVM:
+ case RISCV::VMADC_VXM:
+ case RISCV::VMSBC_VVM:
+ case RISCV::VMSBC_VXM:
+ case RISCV::VSBC_VVM:
+ case RISCV::VSBC_VXM:
// Vector Widening Integer Multiply-Add Instructions
case RISCV::VWMACCU_VV:
case RISCV::VWMACCU_VX:
>From f7cedeebc589f32911fcce2869ea2c5b71623ff3 Mon Sep 17 00:00:00 2001
From: "Mikhail R. Gadelha" <mikhail at igalia.com>
Date: Thu, 10 Jul 2025 17:11:16 -0300
Subject: [PATCH 2/2] Updated test
Signed-off-by: Mikhail R. Gadelha <mikhail at igalia.com>
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 21 +++++++-------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 317ad0c124e73..7d7e217ba8773 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1033,9 +1033,8 @@ define <vscale x 4 x i1> @vmadc_vim(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vmadc_vim:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmadc.vim v11, v8, 5, v0
-; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; VLOPT-NEXT: vmand.mm v0, v11, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, <vscale x 4 x i1> %mask, iXLen -1)
@@ -1054,9 +1053,8 @@ define <vscale x 4 x i1> @vmadc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vmadc_vxm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmadc.vxm v11, v8, a0, v0
-; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; VLOPT-NEXT: vmand.mm v0, v11, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1)
@@ -1075,9 +1073,8 @@ define <vscale x 4 x i1> @vmadc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vmadc_vvm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmadc.vvm v11, v8, v12, v0
-; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; VLOPT-NEXT: vmand.mm v0, v11, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, <vscale x 4 x i1> %mask, iXLen -1)
@@ -1096,9 +1093,8 @@ define <vscale x 4 x i1> @vmsbc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vmsbc_vvm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; VLOPT-NEXT: vmsbc.vvm v11, v8, v12, v0
-; VLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; VLOPT-NEXT: vmand.mm v0, v11, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, <vscale x 4 x i1> %mask, iXLen -1)
@@ -1117,9 +1113,8 @@ define <vscale x 4 x i1> @vmsbc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vmsbc_vxm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; VLOPT-NEXT: vmsbc.vxm v11, v8, a0, v0
-; VLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; VLOPT-NEXT: vmand.mm v0, v11, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1)
@@ -5413,9 +5408,8 @@ define <vscale x 4 x i32> @vsbc_vvm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vsbc_vvm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; VLOPT-NEXT: vsbc.vvm v8, v8, v10, v0
; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vsbc.vvm v8, v8, v10, v0
; VLOPT-NEXT: vadd.vv v8, v8, v12
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.nxv4i1(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, iXLen -1)
@@ -5434,9 +5428,8 @@ define <vscale x 4 x i32> @vsbc_vxm(<vscale x 4 x i32> %a, <vscale x 4 x i1> %ma
;
; VLOPT-LABEL: vsbc_vxm:
; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; VLOPT-NEXT: vsbc.vxm v8, v8, a0, v0
; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vsbc.vxm v8, v8, a0, v0
; VLOPT-NEXT: vadd.vv v8, v8, v10
; VLOPT-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.nxv4i1(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %c, <vscale x 4 x i1> %mask, iXLen -1)
More information about the llvm-commits
mailing list