[llvm] d99c999 - [RISCV][VLOPT] Add support for mask-register logical instructions and set mask instructions (#112231)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 12 09:24:56 PST 2024
Author: Michael Maitland
Date: 2024-12-12T12:24:52-05:00
New Revision: d99c9994db5e051dc4b71c7bce6e56f8c9c72c1a
URL: https://github.com/llvm/llvm-project/commit/d99c9994db5e051dc4b71c7bce6e56f8c9c72c1a
DIFF: https://github.com/llvm/llvm-project/commit/d99c9994db5e051dc4b71c7bce6e56f8c9c72c1a.diff
LOG: [RISCV][VLOPT] Add support for mask-register logical instructions and set mask instructions (#112231)
We need to adjust getEMULEqualsEEWDivSEWTimesLMUL to account for the
fact that Log2EEW for mask instructions is 0 but their EMUL is
calculated using Log2EEW=3.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 0df4c451894bec..1d5684d6038ea9 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -159,6 +159,12 @@ getEMULEqualsEEWDivSEWTimesLMUL(unsigned Log2EEW, const MachineInstr &MI) {
auto [MILMUL, MILMULIsFractional] = RISCVVType::decodeVLMUL(MIVLMUL);
unsigned MILog2SEW =
MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm();
+
+ // Mask instructions will have 0 as the SEW operand. But the LMUL of these
+ // instructions is calculated is as if the SEW operand was 3 (e8).
+ if (MILog2SEW == 0)
+ MILog2SEW = 3;
+
unsigned MISEW = 1 << MILog2SEW;
unsigned EEW = 1 << Log2EEW;
@@ -492,6 +498,29 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(EMUL, Log2EEW);
}
+ // Vector Mask Instructions
+ // Vector Mask-Register Logical Instructions
+ // vmsbf.m set-before-first mask bit
+ // vmsif.m set-including-first mask bit
+ // vmsof.m set-only-first mask bit
+ // EEW=1 and EMUL=(EEW/SEW)*LMUL
+ // We handle the cases when operand is a v0 mask operand above the switch,
+ // but these instructions may use non-v0 mask operands and need to be handled
+ // specifically.
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M: {
+ return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
+ }
+
default:
return {};
}
@@ -632,6 +661,23 @@ static bool isSupportedInstr(const MachineInstr &MI) {
// Vector Crypto
case RISCV::VWSLL_VI:
+
+ // Vector Mask Instructions
+ // Vector Mask-Register Logical Instructions
+ // vmsbf.m set-before-first mask bit
+ // vmsif.m set-including-first mask bit
+ // vmsof.m set-only-first mask bit
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M:
return true;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index e13482d23a26f4..a21e3df85193fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -2159,3 +2159,296 @@ define <vscale x 1 x i8> @vmerge_vvm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x
%3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl)
ret <vscale x 1 x i8> %3
}
+
+define <vscale x 1 x i32> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmnand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmandn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmandn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmandn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmandn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmxor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+
+define <vscale x 1 x i32> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmorn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmorn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmorn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmorn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmxnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmsbf_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v9, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmsif_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsif_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsif.m v9, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsif_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsif.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
+define <vscale x 1 x i32> @vmsof_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vmsof_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsof.m v9, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsof_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsof.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 814894f4acea3c..f1e7bb446482e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -542,4 +542,64 @@ body: |
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
PseudoVSSE8_V_MF2 %x, $noreg, $noreg, 1, 3 /* e8 */
...
+---
+name: vmop_mm
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm
+ ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 0
+...
+---
+name: vmop_mm_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm_incompatible_eew
+ ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmop_mm_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm_incompatible_emul
+ ; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vr = PseudoVMAND_MM_MF2 $noreg, %x, 1, 0
+...
+---
+name: vmop_mm_mask
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm_mask
+ ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vrnov0 = PseudoVADD_VV_M1_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vmop_mm_mask_larger_emul_user
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm_mask_larger_emul_user
+ ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vrm2nov0 = PseudoVADD_VV_M2_MASK $noreg, $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vmop_mm_mask_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vmop_mm_mask_incompatible_emul
+ ; CHECK: %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vmv0 = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 0
+ %y:vrnov0 = PseudoVADD_VV_MF2_MASK $noreg, $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
More information about the llvm-commits
mailing list