[llvm] 9980148 - [RISCV][SelectionDAG] Support VP_ADD/VP_MUL/VP_SUB mask operations

Lian Wang via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 25 19:34:21 PDT 2022


Author: Lian Wang
Date: 2022-04-26T02:30:22Z
New Revision: 998014830549b411b0f52f0fbf6c997a4faf3d8e

URL: https://github.com/llvm/llvm-project/commit/998014830549b411b0f52f0fbf6c997a4faf3d8e
DIFF: https://github.com/llvm/llvm-project/commit/998014830549b411b0f52f0fbf6c997a4faf3d8e.diff

LOG: [RISCV][SelectionDAG] Support VP_ADD/VP_MUL/VP_SUB mask operations

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D124144

Added: 
    llvm/test/CodeGen/RISCV/fixed-vectors-vadd-vp-mask.ll
    llvm/test/CodeGen/RISCV/fixed-vectors-vmul-vp-mask.ll
    llvm/test/CodeGen/RISCV/fixed-vectors-vsub-vp-mask.ll
    llvm/test/CodeGen/RISCV/vadd-vp-mask.ll
    llvm/test/CodeGen/RISCV/vmul-vp-mask.ll
    llvm/test/CodeGen/RISCV/vsub-vp-mask.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index f8d739dde6397..69e100c2366a6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8818,6 +8818,17 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     assert(Ops[2].getValueType() == Ops[3].getValueType() &&
            "LHS/RHS of comparison should match types!");
     break;
+  case ISD::VP_ADD:
+  case ISD::VP_SUB:
+    // If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR
+    if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
+      Opcode = ISD::VP_XOR;
+    break;
+  case ISD::VP_MUL:
+    // If it is VP_MUL mask operation then turn it to VP_AND
+    if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
+      Opcode = ISD::VP_AND;
+    break;
   }
 
   // Memoize nodes.

diff  --git a/llvm/test/CodeGen/RISCV/fixed-vectors-vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/fixed-vectors-vadd-vp-mask.ll
new file mode 100644
index 0000000000000..9787fa8a2a9e3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fixed-vectors-vadd-vp-mask.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+declare <2 x i1> @llvm.vp.add.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
+
+define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <2 x i1> @llvm.vp.add.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
+  ret <2 x i1> %v
+}
+
+declare <4 x i1> @llvm.vp.add.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
+
+define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <4 x i1> @llvm.vp.add.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
+  ret <4 x i1> %v
+}
+
+declare <8 x i1> @llvm.vp.add.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
+
+define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <8 x i1> @llvm.vp.add.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
+  ret <8 x i1> %v
+}
+
+declare <16 x i1> @llvm.vp.add.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
+
+define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <16 x i1> @llvm.vp.add.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
+  ret <16 x i1> %v
+}
+
+declare <32 x i1> @llvm.vp.add.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
+
+define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <32 x i1> @llvm.vp.add.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
+  ret <32 x i1> %v
+}
+
+declare <64 x i1> @llvm.vp.add.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
+
+define <64 x i1> @vadd_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <64 x i1> @llvm.vp.add.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
+  ret <64 x i1> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/fixed-vectors-vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/fixed-vectors-vmul-vp-mask.ll
new file mode 100644
index 0000000000000..8a7d6e65091b9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fixed-vectors-vmul-vp-mask.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+declare <2 x i1> @llvm.vp.mul.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
+
+define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <2 x i1> @llvm.vp.mul.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
+  ret <2 x i1> %v
+}
+
+declare <4 x i1> @llvm.vp.mul.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
+
+define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <4 x i1> @llvm.vp.mul.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
+  ret <4 x i1> %v
+}
+
+declare <8 x i1> @llvm.vp.mul.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
+
+define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <8 x i1> @llvm.vp.mul.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
+  ret <8 x i1> %v
+}
+
+declare <16 x i1> @llvm.vp.mul.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
+
+define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <16 x i1> @llvm.vp.mul.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
+  ret <16 x i1> %v
+}
+
+declare <32 x i1> @llvm.vp.mul.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
+
+define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <32 x i1> @llvm.vp.mul.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
+  ret <32 x i1> %v
+}
+
+declare <64 x i1> @llvm.vp.mul.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
+
+define <64 x i1> @vmul_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <64 x i1> @llvm.vp.mul.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
+  ret <64 x i1> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/fixed-vectors-vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/fixed-vectors-vsub-vp-mask.ll
new file mode 100644
index 0000000000000..9dea3b6e9a884
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fixed-vectors-vsub-vp-mask.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+declare <2 x i1> @llvm.vp.sub.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
+
+define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <2 x i1> @llvm.vp.sub.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl)
+  ret <2 x i1> %v
+}
+
+declare <4 x i1> @llvm.vp.sub.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
+
+define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <4 x i1> @llvm.vp.sub.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl)
+  ret <4 x i1> %v
+}
+
+declare <8 x i1> @llvm.vp.sub.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
+
+define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <8 x i1> @llvm.vp.sub.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl)
+  ret <8 x i1> %v
+}
+
+declare <16 x i1> @llvm.vp.sub.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
+
+define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <16 x i1> @llvm.vp.sub.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl)
+  ret <16 x i1> %v
+}
+
+declare <32 x i1> @llvm.vp.sub.v32i1(<32 x i1>, <32 x i1>, <32 x i1>, i32)
+
+define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <32 x i1> @llvm.vp.sub.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl)
+  ret <32 x i1> %v
+}
+
+declare <64 x i1> @llvm.vp.sub.v64i1(<64 x i1>, <64 x i1>, <64 x i1>, i32)
+
+define <64 x i1> @vsub_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <64 x i1> @llvm.vp.sub.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl)
+  ret <64 x i1> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll
new file mode 100644
index 0000000000000..568e29f400624
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+
+declare <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i1> @vadd_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.add.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
+
+declare <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i1> @vadd_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i1> @llvm.vp.add.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i1> %v
+}
+
+declare <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i1> @vadd_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 8 x i1> @llvm.vp.add.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i1> %v
+}
+
+declare <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i1> @vadd_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 16 x i1> @llvm.vp.add.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i1> %v
+}
+
+declare <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i1> @vadd_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i1> @llvm.vp.add.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i1> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll
new file mode 100644
index 0000000000000..6e11b26afa995
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+
+declare <vscale x 2 x i1> @llvm.vp.mul.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i1> @vmul_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.mul.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
+
+declare <vscale x 4 x i1> @llvm.vp.mul.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i1> @vmul_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i1> @llvm.vp.mul.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i1> %v
+}
+
+declare <vscale x 8 x i1> @llvm.vp.mul.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i1> @vmul_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 8 x i1> @llvm.vp.mul.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i1> %v
+}
+
+declare <vscale x 16 x i1> @llvm.vp.mul.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i1> @vmul_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 16 x i1> @llvm.vp.mul.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i1> %v
+}
+
+declare <vscale x 32 x i1> @llvm.vp.mul.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i1> @vmul_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmand.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i1> @llvm.vp.mul.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i1> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll
new file mode 100644
index 0000000000000..405691009ab96
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK
+
+
+declare <vscale x 2 x i1> @llvm.vp.sub.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i1> @vsub_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.sub.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %b, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
+
+declare <vscale x 4 x i1> @llvm.vp.sub.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i1> @vsub_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 4 x i1> @llvm.vp.sub.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %b, <vscale x 4 x i1> %m, i32 %evl)
+  ret <vscale x 4 x i1> %v
+}
+
+declare <vscale x 8 x i1> @llvm.vp.sub.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i1> @vsub_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 8 x i1> @llvm.vp.sub.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %b, <vscale x 8 x i1> %m, i32 %evl)
+  ret <vscale x 8 x i1> %v
+}
+
+declare <vscale x 16 x i1> @llvm.vp.sub.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i1> @vsub_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 16 x i1> @llvm.vp.sub.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %b, <vscale x 16 x i1> %m, i32 %evl)
+  ret <vscale x 16 x i1> %v
+}
+
+declare <vscale x 32 x i1> @llvm.vp.sub.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i1> @vsub_vv_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v8
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i1> @llvm.vp.sub.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %b, <vscale x 32 x i1> %m, i32 %evl)
+  ret <vscale x 32 x i1> %v
+}


        


More information about the llvm-commits mailing list