[llvm] [RISCV][VLOPT] Add vector mask producing integer instructions to isSupportedInstr and getOperandInfo (PR #119733)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 12 11:05:14 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/119733
>From cad30916176e9cae7ac47fa4a3e0c3b4e5d932d3 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 11 Dec 2024 10:50:13 -0800
Subject: [PATCH 1/3] [RISCV][VLOPT] Add vector integer cmp instructions to
isSupportedInstr and getOperandInfo
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 47 ++-
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 399 ++++++++++++++++++
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 81 +++-
3 files changed, 525 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 1d5684d6038ea9..e3b21ec05171e8 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -521,6 +521,32 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
}
+ // Vector Integer Compare Instructions
+ // Dest EEW=1 and EMUL=(EEW/SEW)*LMUL. Source EEW=SEW and EMUL=LMUL.
+ case RISCV::VMSEQ_VI:
+ case RISCV::VMSEQ_VV:
+ case RISCV::VMSEQ_VX:
+ case RISCV::VMSNE_VI:
+ case RISCV::VMSNE_VV:
+ case RISCV::VMSNE_VX:
+ case RISCV::VMSLTU_VV:
+ case RISCV::VMSLTU_VX:
+ case RISCV::VMSLT_VV:
+ case RISCV::VMSLT_VX:
+ case RISCV::VMSLEU_VV:
+ case RISCV::VMSLEU_VI:
+ case RISCV::VMSLEU_VX:
+ case RISCV::VMSLE_VV:
+ case RISCV::VMSLE_VI:
+ case RISCV::VMSLE_VX:
+ case RISCV::VMSGTU_VI:
+ case RISCV::VMSGTU_VX:
+ case RISCV::VMSGT_VI:
+ case RISCV::VMSGT_VX:
+ if (IsMODef)
+ return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
+ return OperandInfo(MIVLMul, MILog2SEW);
+
default:
return {};
}
@@ -599,7 +625,26 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VNSRA_WV:
case RISCV::VNSRA_WX:
// Vector Integer Compare Instructions
- // FIXME: Add support
+ case RISCV::VMSEQ_VI:
+ case RISCV::VMSEQ_VV:
+ case RISCV::VMSEQ_VX:
+ case RISCV::VMSNE_VI:
+ case RISCV::VMSNE_VV:
+ case RISCV::VMSNE_VX:
+ case RISCV::VMSLTU_VV:
+ case RISCV::VMSLTU_VX:
+ case RISCV::VMSLT_VV:
+ case RISCV::VMSLT_VX:
+ case RISCV::VMSLEU_VV:
+ case RISCV::VMSLEU_VI:
+ case RISCV::VMSLEU_VX:
+ case RISCV::VMSLE_VV:
+ case RISCV::VMSLE_VI:
+ case RISCV::VMSLE_VX:
+ case RISCV::VMSGTU_VI:
+ case RISCV::VMSGTU_VX:
+ case RISCV::VMSGT_VI:
+ case RISCV::VMSGT_VX:
// Vector Integer Min/Max Instructions
case RISCV::VMINU_VV:
case RISCV::VMINU_VX:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index a21e3df85193fb..23508ad2ef51a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1082,6 +1082,405 @@ define <vscale x 4 x i16> @vnsra_wv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b
ret <vscale x 4 x i16> %2
}
+define <vscale x 4 x i1> @vmseq_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmseq_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmseq_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmseq_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmseq.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmseq_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmseq.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsne_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsne_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsne.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsne_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsne.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsltu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsltu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsltu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsltu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsltu.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsltu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsltu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsltu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsltu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsltu.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmslt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmslt_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmslt.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmslt_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmslt.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmslt_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmslt_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmslt.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmslt_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmslt.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsleu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsleu_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsleu.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsleu_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsleu.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsle_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsle_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsle.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsle_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsle.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgtu_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgtu_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgtu.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgtu_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgtu.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgtu_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgtu_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgtu.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgtu_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgtu.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgt_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgt_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgt.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgt_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgt.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsgt_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsgt_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsgt.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsgt_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsgt.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
define <vscale x 4 x i32> @vminu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
; NOVLOPT-LABEL: vminu_vv:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index f1e7bb446482e1..165f1ebfa5aa7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -602,4 +602,83 @@ body: |
%x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
%y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
-
+---
+name: vmcmp_vv
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv
+ ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0
+...
+---
+name: vmcmp_vv_maskuser
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_maskuser
+ ; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmcmp_vv_maskuser_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_maskuser_incompatible_eew
+ ; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vmcmp_vv_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_incompatible_emul
+ ; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0
+...
+---
+name: vmcmp_vv_maskuser_incompaible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_maskuser_incompaible_emul
+ ; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmcmp_vv_maskuser_larger_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_maskuser_larger_emul
+ ; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
+ %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmcmp_vv_consumer_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_consumer
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 0
+...
+---
+name: vmcmp_vv_consumer_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmcmp_vv_consumer_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 0
+...
>From b9fbab701be9382053829a5da086521b6c3728a2 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 12 Dec 2024 10:55:55 -0800
Subject: [PATCH 2/3] fixup! include vector integer add-cary sub-borrow
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 27 ++++++++++++++++++-
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 16 +++++------
2 files changed, 34 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index e3b21ec05171e8..7f08bd2973e4b2 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -341,11 +341,17 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
case RISCV::VNMSUB_VV:
case RISCV::VNMSUB_VX:
// Vector Integer Merge Instructions
+ // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
// EEW=SEW and EMUL=LMUL, except the mask operand has EEW=1 and EMUL=
// (EEW/SEW)*LMUL. Mask operand is handled before this switch.
case RISCV::VMERGE_VIM:
case RISCV::VMERGE_VVM:
case RISCV::VMERGE_VXM:
+ case RISCV::VADC_VIM:
+ case RISCV::VADC_VVM:
+ case RISCV::VADC_VXM:
+ case RISCV::VSBC_VVM:
+ case RISCV::VSBC_VXM:
// Vector Integer Move Instructions
// Vector Fixed-Point Arithmetic Instructions
// Vector Single-Width Saturating Add and Subtract
@@ -438,7 +444,11 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
case RISCV::VWMACC_VX:
case RISCV::VWMACCSU_VV:
case RISCV::VWMACCSU_VX:
- case RISCV::VWMACCUS_VX: {
+ case RISCV::VWMACCUS_VX:
+ // Vector Single-Width Fractional Multiply with Rounding and Saturation
+ // Destination EEW=2*SEW and EMUL=2*EMUL. Source EEW=SEW and EMUL=LMUL.
+ case RISCV::VSMUL_VV:
+ case RISCV::VSMUL_VX: {
unsigned Log2EEW = IsMODef ? MILog2SEW + 1 : MILog2SEW;
RISCVII::VLMUL EMUL =
IsMODef ? RISCVVType::twoTimesVLMUL(MIVLMul) : MIVLMul;
@@ -543,9 +553,24 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
case RISCV::VMSGTU_VX:
case RISCV::VMSGT_VI:
case RISCV::VMSGT_VX:
+ // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+ // Dest EEW=1 and EMUL=(EEW/SEW)*LMUL. Source EEW=SEW and EMUL=LMUL. Mask
+ // source operand handled above this switch.
+ case RISCV::VMADC_VIM:
+ case RISCV::VMADC_VVM:
+ case RISCV::VMADC_VXM:
+ case RISCV::VMSBC_VVM:
+ case RISCV::VMSBC_VXM:
+ // Dest EEW=1 and EMUL=(EEW/SEW)*LMUL. Source EEW=SEW and EMUL=LMUL.
+ case RISCV::VMADC_VV:
+ case RISCV::VMADC_VI:
+ case RISCV::VMADC_VX:
+ case RISCV::VMSBC_VV:
+ case RISCV::VMSBC_VX: {
if (IsMODef)
return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
return OperandInfo(MIVLMul, MILog2SEW);
+ }
default:
return {};
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 165f1ebfa5aa7e..6d236341ddf2a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -603,7 +603,7 @@ body: |
%y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
---
-name: vmcmp_vv
+name: vmop_vv
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv
@@ -613,7 +613,7 @@ body: |
%y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0
...
---
-name: vmcmp_vv_maskuser
+name: vmop_vv_maskuser
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_maskuser
@@ -623,7 +623,7 @@ body: |
%y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
---
-name: vmcmp_vv_maskuser_incompatible_eew
+name: vmop_vv_maskuser_incompatible_eew
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_maskuser_incompatible_eew
@@ -633,7 +633,7 @@ body: |
%y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
...
---
-name: vmcmp_vv_incompatible_emul
+name: vmop_vv_incompatible_emul
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_incompatible_emul
@@ -643,7 +643,7 @@ body: |
%y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0
...
---
-name: vmcmp_vv_maskuser_incompaible_emul
+name: vmop_vv_maskuser_incompaible_emul
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_maskuser_incompaible_emul
@@ -653,7 +653,7 @@ body: |
%y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
---
-name: vmcmp_vv_maskuser_larger_emul
+name: vmop_vv_maskuser_larger_emul
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_maskuser_larger_emul
@@ -663,7 +663,7 @@ body: |
%y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
---
-name: vmcmp_vv_consumer_incompatible_eew
+name: vmop_vv_consumer_incompatible_eew
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_consumer
@@ -673,7 +673,7 @@ body: |
%y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 0
...
---
-name: vmcmp_vv_consumer_incompatible_emul
+name: vmop_vv_consumer_incompatible_emul
body: |
bb.0:
; CHECK-LABEL: name: vmcmp_vv_consumer_incompatible_emul
>From b805561b8d0e7f11097dd68bbc6ff88a189d7279 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 12 Dec 2024 11:02:04 -0800
Subject: [PATCH 3/3] fixup! cleanup
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 11 +-
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 100 ++++++++++++++++++
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 16 +--
3 files changed, 114 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 7f08bd2973e4b2..fcddd6d66a35fc 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -444,11 +444,7 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
case RISCV::VWMACC_VX:
case RISCV::VWMACCSU_VV:
case RISCV::VWMACCSU_VX:
- case RISCV::VWMACCUS_VX:
- // Vector Single-Width Fractional Multiply with Rounding and Saturation
- // Destination EEW=2*SEW and EMUL=2*EMUL. Source EEW=SEW and EMUL=LMUL.
- case RISCV::VSMUL_VV:
- case RISCV::VSMUL_VX: {
+ case RISCV::VWMACCUS_VX: {
unsigned Log2EEW = IsMODef ? MILog2SEW + 1 : MILog2SEW;
RISCVII::VLMUL EMUL =
IsMODef ? RISCVVType::twoTimesVLMUL(MIVLMul) : MIVLMul;
@@ -642,6 +638,11 @@ static bool isSupportedInstr(const MachineInstr &MI) {
case RISCV::VSEXT_VF8:
// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
// FIXME: Add support
+ case RISCV::VMADC_VV:
+ case RISCV::VMADC_VI:
+ case RISCV::VMADC_VX:
+ case RISCV::VMSBC_VV:
+ case RISCV::VMSBC_VX:
// Vector Narrowing Integer Right Shift Instructions
case RISCV::VNSRL_WX:
case RISCV::VNSRL_WI:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 23508ad2ef51a2..35274c25da856a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -962,6 +962,106 @@ define <vscale x 4 x i64> @vzext_vf8(<vscale x 4 x i8> %a, <vscale x 4 x i64> %b
ret <vscale x 4 x i64> %2
}
+define <vscale x 4 x i1> @vmadc_vi(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmadc_vi:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmadc.vi v10, v8, 5
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmadc_vi:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmadc.vi v10, v8, 5
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 5, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmadc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmadc_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmadc.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmadc_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmadc.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmadc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmadc_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmadc.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmadc_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmadc.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsbc_vx(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, i32 %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbc_vx:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsbc.vx v10, v8, a0
+; NOVLOPT-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v10, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbc_vx:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; VLOPT-NEXT: vmsbc.vx v10, v8, a0
+; VLOPT-NEXT: vmand.mm v0, v10, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(<vscale x 4 x i32> %a, i32 %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
+define <vscale x 4 x i1> @vmsbc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i1> %b, <vscale x 4 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbc_vv:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; NOVLOPT-NEXT: vmsbc.vv v12, v8, v10
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v12, v0
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbc_vv:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; VLOPT-NEXT: vmsbc.vv v12, v8, v10
+; VLOPT-NEXT: vmand.mm v0, v12, v0
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %c, iXLen -1)
+ %2 = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(<vscale x 4 x i1> %1, <vscale x 4 x i1> %b, iXLen %vl)
+ ret <vscale x 4 x i1> %2
+}
+
define <vscale x 4 x i16> @vnsrl_wi(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, iXLen %vl) {
; NOVLOPT-LABEL: vnsrl_wi:
; NOVLOPT: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 6d236341ddf2a4..7f767171807358 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -606,7 +606,7 @@ body: |
name: vmop_vv
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv
+ ; CHECK-LABEL: name: vmop_vv
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -616,7 +616,7 @@ body: |
name: vmop_vv_maskuser
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_maskuser
+ ; CHECK-LABEL: name: vmop_vv_maskuser
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, 1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -626,7 +626,7 @@ body: |
name: vmop_vv_maskuser_incompatible_eew
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_maskuser_incompatible_eew
+ ; CHECK-LABEL: name: vmop_vv_maskuser_incompatible_eew
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -636,7 +636,7 @@ body: |
name: vmop_vv_incompatible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_incompatible_emul
+ ; CHECK-LABEL: name: vmop_vv_incompatible_emul
; CHECK: %x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
%x:vr = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -646,7 +646,7 @@ body: |
name: vmop_vv_maskuser_incompaible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_maskuser_incompaible_emul
+ ; CHECK-LABEL: name: vmop_vv_maskuser_incompaible_emul
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -656,7 +656,7 @@ body: |
name: vmop_vv_maskuser_larger_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_maskuser_larger_emul
+ ; CHECK-LABEL: name: vmop_vv_maskuser_larger_emul
; CHECK: %x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0 /* e8 */
; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vmv0 = PseudoVMSEQ_VV_M1 $noreg, $noreg, -1, 0
@@ -666,7 +666,7 @@ body: |
name: vmop_vv_consumer_incompatible_eew
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_consumer
+ ; CHECK-LABEL: name: vmop_vv_consumer_incompatible_eew
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_M1 $noreg, %x, 1, 0 /* e8 */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
@@ -676,7 +676,7 @@ body: |
name: vmop_vv_consumer_incompatible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vmcmp_vv_consumer_incompatible_emul
+ ; CHECK-LABEL: name: vmop_vv_consumer_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVMSEQ_VV_MF2 $noreg, %x, 1, 0 /* e8 */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
More information about the llvm-commits
mailing list