[llvm-branch-commits] [llvm] 351c216 - [RISCV] Define vector mask-register logical intrinsics.

Zakk Chen via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Dec 24 19:22:00 PST 2020


Author: Zakk Chen
Date: 2020-12-24T18:59:05-08:00
New Revision: 351c216f36afab3bb88eb74995a39940b85e3812

URL: https://github.com/llvm/llvm-project/commit/351c216f36afab3bb88eb74995a39940b85e3812
DIFF: https://github.com/llvm/llvm-project/commit/351c216f36afab3bb88eb74995a39940b85e3812.diff

LOG: [RISCV] Define vector mask-register logical intrinsics.

Define vector mask-register logical intrinsics and lower them
to V instructions. Also define pseudo instructions vmmv.m
and vmnot.m.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>

Differential Revision: https://reviews.llvm.org/D93705

Added: 
    llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index cb335e739266..6778b20ac0a8 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -189,6 +189,12 @@ let TargetPrefix = "riscv" in {
                      LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+  // For destination vector type is the same as first and second source vector.
+  // Input: (vector_in, vector_in, vl)
+  class RISCVBinaryAAANoMask
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic;
   // For destination vector type is the same as first source vector.
   // Input: (vector_in, vector_in/scalar_in, vl)
   class RISCVBinaryAAXNoMask
@@ -643,4 +649,13 @@ let TargetPrefix = "riscv" in {
   defm vfredsum : RISCVReduction;
   defm vfredmin : RISCVReduction;
   defm vfredmax : RISCVReduction;
+
+  def int_riscv_vmand: RISCVBinaryAAANoMask;
+  def int_riscv_vmnand: RISCVBinaryAAANoMask;
+  def int_riscv_vmandnot: RISCVBinaryAAANoMask;
+  def int_riscv_vmxor: RISCVBinaryAAANoMask;
+  def int_riscv_vmor: RISCVBinaryAAANoMask;
+  def int_riscv_vmnor: RISCVBinaryAAANoMask;
+  def int_riscv_vmornot: RISCVBinaryAAANoMask;
+  def int_riscv_vmxnor: RISCVBinaryAAANoMask;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 713a289badc2..c23c650973b3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -188,6 +188,25 @@ class GetIntVTypeInfo<VTypeInfo vti>
   VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
 }
 
+class MTypeInfo<ValueType Mas, LMULInfo M> {
+  ValueType Mask = Mas;
+  // {SEW, VLMul} values set a valid VType to deal with this mask type.
+  // we assume SEW=8 and set corresponding LMUL.
+  int SEW = 8;
+  LMULInfo LMul = M;
+}
+
+defset list<MTypeInfo> AllMasks = {
+  // vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
+  def : MTypeInfo<vbool64_t, V_MF8>;
+  def : MTypeInfo<vbool32_t, V_MF4>;
+  def : MTypeInfo<vbool16_t, V_MF2>;
+  def : MTypeInfo<vbool8_t, V_M1>;
+  def : MTypeInfo<vbool4_t, V_M2>;
+  def : MTypeInfo<vbool2_t, V_M4>;
+  def : MTypeInfo<vbool1_t, V_M8>;
+}
+
 class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
 {
   VTypeInfo Vti = vti;
@@ -697,6 +716,13 @@ multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
     defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
 }
 
+multiclass VPseudoBinaryM_MM {
+  foreach m = MxList.m in
+    let VLMul = m.value in {
+      def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">;
+    }
+}
+
 // We use earlyclobber here due to
 // * The destination EEW is smaller than the source EEW and the overlap is
 //   in the lowest-numbered part of the source register group is legal.
@@ -1297,6 +1323,13 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
                       vti.RegClass, imm_type>;
 }
 
+multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
+  foreach mti = AllMasks in
+    def : VPatBinaryNoMask<intrinsic, instruction, "MM",
+                           mti.Mask, mti.Mask, mti.Mask,
+                           mti.SEW, mti.LMul, VR, VR>;
+}
+
 multiclass VPatBinaryW_VV<string intrinsic, string instruction,
                           list<VTypeInfoToWide> vtilist> {
   foreach VtiToWti = vtilist in {
@@ -2053,6 +2086,27 @@ defm PseudoVFREDMIN    : VPseudoReductionV_VS;
 defm PseudoVFREDMAX    : VPseudoReductionV_VS;
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===----------------------------------------------------------------------===//
+// 16. Vector Mask Instructions
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 16.1 Vector Mask-Register Logical Instructions
+//===----------------------------------------------------------------------===//
+
+defm PseudoVMAND: VPseudoBinaryM_MM;
+defm PseudoVMNAND: VPseudoBinaryM_MM;
+defm PseudoVMANDNOT: VPseudoBinaryM_MM;
+defm PseudoVMXOR: VPseudoBinaryM_MM;
+defm PseudoVMOR: VPseudoBinaryM_MM;
+defm PseudoVMNOR: VPseudoBinaryM_MM;
+defm PseudoVMORNOT: VPseudoBinaryM_MM;
+defm PseudoVMXNOR: VPseudoBinaryM_MM;
+
+//===----------------------------------------------------------------------===//
+// 17. Vector Permutation Instructions
+//===----------------------------------------------------------------------===//
+
 //===----------------------------------------------------------------------===//
 // 17.1. Integer Scalar Move Instructions
 //===----------------------------------------------------------------------===//
@@ -2512,6 +2566,24 @@ defm "" : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/
 defm "" : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+//===----------------------------------------------------------------------===//
+// 16. Vector Mask Instructions
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 16.1 Vector Mask-Register Logical Instructions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtV] in {
+  defm "" : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
+  defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
+} // Predicates = [HasStdExtV]
+
 //===----------------------------------------------------------------------===//
 // 17. Vector Permutation Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
new file mode 100644
index 000000000000..58155699a954
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
new file mode 100644
index 000000000000..28710a511ecb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmand-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
new file mode 100644
index 000000000000..efc39d222235
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
new file mode 100644
index 000000000000..51061af58af6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmandnot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
new file mode 100644
index 000000000000..5e5d1f613ce7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
new file mode 100644
index 000000000000..aa4c2b3e927a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnand-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmnand_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmnand_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmnand_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmnand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmnand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmnand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmnand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmnand.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
new file mode 100644
index 000000000000..ae9ff38377ac
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
new file mode 100644
index 000000000000..454599c19913
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmnor-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
new file mode 100644
index 000000000000..09db9ddb2b69
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
new file mode 100644
index 000000000000..ee9cd0ba85ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmor-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
new file mode 100644
index 000000000000..175bf26051af
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
new file mode 100644
index 000000000000..96cf32b3640d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmornot.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
new file mode 100644
index 000000000000..878def5f4362
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
new file mode 100644
index 000000000000..073509fc5c94
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmxnor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmxnor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmxnor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmxnor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmxnor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmxnor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmxnor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
new file mode 100644
index 000000000000..743aff55e3eb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i32 %2)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
new file mode 100644
index 000000000000..fc9d15a88f47
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmxor-rv64.ll
@@ -0,0 +1,127 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
+  <vscale x 1 x i1>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
+  <vscale x 2 x i1>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
+  <vscale x 4 x i1>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
+  <vscale x 8 x i1>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
+  <vscale x 16 x i1>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
+  <vscale x 32 x i1>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
+  <vscale x 64 x i1>,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK:       vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
+    <vscale x 64 x i1> %0,
+    <vscale x 64 x i1> %1,
+    i64 %2)
+
+  ret <vscale x 64 x i1> %a
+}


        


More information about the llvm-branch-commits mailing list