[llvm] [RISCV][VLOPT] Add support for mask-register logical instructions and set mask instructions (PR #112231)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 15 07:23:59 PDT 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/112231
>From ff274e71160631bc147f965981eced9b347f579e Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 14 Oct 2024 09:22:26 -0700
Subject: [PATCH 1/4] [RISCV][VLOPT] Add support for mask-register logical
instructions and set mask instructions
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 38 ++++
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 223 +++++++++++++++++++
2 files changed, 261 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index eb1f4df4ff7264..739db612ef5849 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -475,6 +475,28 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(EMUL, Log2EEW);
}
+ // 15. Vector Mask Instructions
+ // 15.1. Vector Mask-Register Logical Instructions
+ // 15.4. vmsbf.m set-before-first mask bit
+ // 15.6. vmsof.m set-only-first mask bit
+ // EEW=1 and EMUL=(EEW/SEW)*LMUL
+ // We handle the cases when operand is a v0 mask operand above the switch,
+ // but these instructions may use non-v0 mask operands and need to be handled
+ // specifically.
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M: {
+ return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
+ }
+
default:
return {};
}
@@ -565,6 +587,22 @@ static bool isSupportedInstr(const MachineInstr &MI) {
// Vector Crypto
case RISCV::VWSLL_VI:
+
+ // 15. Vector Mask Instructions
+ // 15.1. Vector Mask-Register Logical Instructions
+ // 15.4. vmsbf.m set-before-first mask bit
+ // 15.6. vmsof.m set-only-first mask bit
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M:
return true;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 107252338829bd..7532cfdff2c148 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -971,3 +971,226 @@ define <vscale x 1 x i8> @vmerge_vvm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x
%3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl)
ret <vscale x 1 x i8> %3
}
+
+
+define <vscale x 1 x i1> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmnand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmandn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmandn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmandn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmandn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmxor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+
+define <vscale x 1 x i1> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmorn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmorn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmorn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmorn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmxnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsbf_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsif_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsof_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
>From 57c302db492c14e6cd8cfbebe369568958204dee Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 15 Oct 2024 07:13:37 -0700
Subject: [PATCH 2/4] fixup! add 15.5 and remove chapter numbers
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 739db612ef5849..392727ef1b4f23 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -475,10 +475,11 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(EMUL, Log2EEW);
}
- // 15. Vector Mask Instructions
- // 15.1. Vector Mask-Register Logical Instructions
- // 15.4. vmsbf.m set-before-first mask bit
- // 15.6. vmsof.m set-only-first mask bit
+ // Vector Mask Instructions
+ // Vector Mask-Register Logical Instructions
+ // vmsbf.m set-before-first mask bit
+ // vmsif.m set-including-first mask bit
+ // vmsof.m set-only-first mask bit
// EEW=1 and EMUL=(EEW/SEW)*LMUL
// We handle the cases when operand is a v0 mask operand above the switch,
// but these instructions may use non-v0 mask operands and need to be handled
@@ -588,10 +589,11 @@ static bool isSupportedInstr(const MachineInstr &MI) {
// Vector Crypto
case RISCV::VWSLL_VI:
- // 15. Vector Mask Instructions
- // 15.1. Vector Mask-Register Logical Instructions
- // 15.4. vmsbf.m set-before-first mask bit
- // 15.6. vmsof.m set-only-first mask bit
+ // Vector Mask Instructions
+ // Vector Mask-Register Logical Instructions
+ // vmsbf.m set-before-first mask bit
+ // vmsif.m set-including-first mask bit
+ // vmsof.m set-only-first mask bit
case RISCV::VMAND_MM:
case RISCV::VMNAND_MM:
case RISCV::VMANDN_MM:
>From 65c38ca23c2a12e9b08770978475eb440a410946 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 15 Oct 2024 07:14:32 -0700
Subject: [PATCH 3/4] fixup! update test checks
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 7532cfdff2c148..5e29faf5fbfa89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1155,18 +1155,18 @@ define <vscale x 1 x i1> @vmsbf_m(<vscale x 1 x i1> %a, iXLen %vl) {
}
define <vscale x 1 x i1> @vmsif_m(<vscale x 1 x i1> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT-LABEL: vmsif_m:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vmsif.m v8, v0
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
; NOVLOPT-NEXT: ret
;
-; VLOPT-LABEL: vmsbf_m:
+; VLOPT-LABEL: vmsif_m:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmsif.m v8, v0
; VLOPT-NEXT: vmand.mm v0, v0, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
@@ -1175,18 +1175,18 @@ define <vscale x 1 x i1> @vmsif_m(<vscale x 1 x i1> %a, iXLen %vl) {
}
define <vscale x 1 x i1> @vmsof_m(<vscale x 1 x i1> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT-LABEL: vmsof_m:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vmsof.m v8, v0
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
; NOVLOPT-NEXT: ret
;
-; VLOPT-LABEL: vmsbf_m:
+; VLOPT-LABEL: vmsof_m:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmsof.m v8, v0
; VLOPT-NEXT: vmand.mm v0, v0, v8
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
>From 40179b32816a5803c6310e442c225aab209e443a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 15 Oct 2024 07:23:43 -0700
Subject: [PATCH 4/4] fixup! test when last instruction consumes as mask
operand
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 140 ++++++++++++++-----
1 file changed, 105 insertions(+), 35 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 5e29faf5fbfa89..76afe772ded084 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -972,14 +972,16 @@ define <vscale x 1 x i8> @vmerge_vvm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x
ret <vscale x 1 x i8> %3
}
-
-define <vscale x 1 x i1> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmand_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmand_mm:
@@ -987,19 +989,26 @@ define <vscale x 1 x i1> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmand.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmnand_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmnand.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmnand_mm:
@@ -1007,19 +1016,26 @@ define <vscale x 1 x i1> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b,
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmnand.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmandn_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmandn.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmandn_mm:
@@ -1027,19 +1043,26 @@ define <vscale x 1 x i1> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b,
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmandn.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmxor_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmxor.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmxor_mm:
@@ -1047,19 +1070,26 @@ define <vscale x 1 x i1> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmxor.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmor_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmor.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmor_mm:
@@ -1067,20 +1097,27 @@ define <vscale x 1 x i1> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iX
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmor.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmnor_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmnor.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmnor_mm:
@@ -1088,19 +1125,26 @@ define <vscale x 1 x i1> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmnor.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmorn_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmorn.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmorn_mm:
@@ -1108,19 +1152,26 @@ define <vscale x 1 x i1> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmorn.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+define <vscale x 1 x i32> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmxnor_mm:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmxnor.mm v8, v0, v8
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmv1r.v v8, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmxnor_mm:
@@ -1128,69 +1179,88 @@ define <vscale x 1 x i1> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b,
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; VLOPT-NEXT: vmxnor.mm v8, v0, v8
; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmv1r.v v8, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v9, v9, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmsbf_m(<vscale x 1 x i1> %a, iXLen %vl) {
+define <vscale x 1 x i32> @vmsbf_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmsbf_m:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vmsbf.m v9, v0
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmsbf_m:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; VLOPT-NEXT: vmsbf.m v8, v0
-; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmsbf.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmsif_m(<vscale x 1 x i1> %a, iXLen %vl) {
+define <vscale x 1 x i32> @vmsif_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmsif_m:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmsif.m v8, v0
+; NOVLOPT-NEXT: vmsif.m v9, v0
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmsif_m:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; VLOPT-NEXT: vmsif.m v8, v0
-; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmsif.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
-define <vscale x 1 x i1> @vmsof_m(<vscale x 1 x i1> %a, iXLen %vl) {
+define <vscale x 1 x i32> @vmsof_m(<vscale x 1 x i1> %a, <vscale x 1 x i32> %c, iXLen %vl) {
; NOVLOPT-LABEL: vmsof_m:
; NOVLOPT: # %bb.0:
; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmsof.m v8, v0
+; NOVLOPT-NEXT: vmsof.m v9, v0
; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: vmand.mm v0, v0, v9
+; NOVLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; NOVLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; NOVLOPT-NEXT: ret
;
; VLOPT-LABEL: vmsof_m:
; VLOPT: # %bb.0:
; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; VLOPT-NEXT: vmsof.m v8, v0
-; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: vmsof.m v9, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v9
+; VLOPT-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
+; VLOPT-NEXT: vadd.vv v8, v8, v8, v0.t
; VLOPT-NEXT: ret
%1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
%2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
- ret <vscale x 1 x i1> %2
+ %3 = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i32> %c, <vscale x 1 x i1> %2, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %3
}
More information about the llvm-commits
mailing list