[llvm-branch-commits] [llvm] f3f9ce3 - [RISCV] Define vmclr.m/vmset.m intrinsics.

Zakk Chen via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Dec 28 19:02:22 PST 2020


Author: Zakk Chen
Date: 2020-12-28T18:57:17-08:00
New Revision: f3f9ce3b7948b250bc532818ed76a64cea8b6fbe

URL: https://github.com/llvm/llvm-project/commit/f3f9ce3b7948b250bc532818ed76a64cea8b6fbe
DIFF: https://github.com/llvm/llvm-project/commit/f3f9ce3b7948b250bc532818ed76a64cea8b6fbe.diff

LOG: [RISCV] Define vmclr.m/vmset.m intrinsics.

Define vmclr.m/vmset.m intrinsics and lower to vmxor.mm/vmxnor.mm.

Ideally all rvv pseudo instructions could be implemented in C header,
but those two instructions don't take an input, codegen can not guarantee
that the source register becomes the same as the destination.

We expand pseduo-v-inst into corresponding v-inst in
RISCVExpandPseudoInsts pass.

Reviewed By: craig.topper, frasercrmck

Differential Revision: https://reviews.llvm.org/D93849

Added: 
    llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index be11b518416c..d72dc5a4dd59 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -404,6 +404,12 @@ let TargetPrefix = "riscv" in {
                     [LLVMMatchType<0>, LLVMMatchType<0>,
                      LLVMMatchType<0>, llvm_anyint_ty],
                     [IntrNoMem]>, RISCVVIntrinsic;
+  // Output: (vector)
+  // Input: (vl)
+  class RISCVNullaryIntrinsic
+        : Intrinsic<[llvm_anyvector_ty],
+                    [llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic;
 
   multiclass RISCVUSLoad {
     def "int_riscv_" # NAME : RISCVUSLoad;
@@ -701,6 +707,8 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vmnor: RISCVBinaryAAANoMask;
   def int_riscv_vmornot: RISCVBinaryAAANoMask;
   def int_riscv_vmxnor: RISCVBinaryAAANoMask;
+  def int_riscv_vmclr : RISCVNullaryIntrinsic;
+  def int_riscv_vmset : RISCVNullaryIntrinsic;
 
   defm vpopc : RISCVMaskUnarySOut;
   defm vfirst : RISCVMaskUnarySOut;
@@ -724,9 +732,8 @@ let TargetPrefix = "riscv" in {
                                        [IntrNoMem]>, RISCVVIntrinsic;
   // Output: (vector)
   // Input: (vl)
-  def int_riscv_vid : Intrinsic<[llvm_anyvector_ty],
-                                [llvm_anyint_ty],
-                                [IntrNoMem]>, RISCVVIntrinsic;
+  def int_riscv_vid : RISCVNullaryIntrinsic;
+
   // Output: (vector)
   // Input: (maskedoff, mask, vl)
   def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],

diff  --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 660ae915f7b8..5f50892ca886 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -60,6 +60,8 @@ class RISCVExpandPseudo : public MachineFunctionPass {
                               MachineBasicBlock::iterator MBBI,
                               MachineBasicBlock::iterator &NextMBBI);
   bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+  bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
+                         MachineBasicBlock::iterator MBBI, unsigned Opcode);
 };
 
 char RISCVExpandPseudo::ID = 0;
@@ -102,6 +104,24 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
     return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
   case RISCV::PseudoVSETVLI:
     return expandVSetVL(MBB, MBBI);
+  case RISCV::PseudoVMCLR_M_B1:
+  case RISCV::PseudoVMCLR_M_B2:
+  case RISCV::PseudoVMCLR_M_B4:
+  case RISCV::PseudoVMCLR_M_B8:
+  case RISCV::PseudoVMCLR_M_B16:
+  case RISCV::PseudoVMCLR_M_B32:
+  case RISCV::PseudoVMCLR_M_B64:
+    // vmclr.m vd => vmxor.mm vd, vd, vd
+    return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXOR_MM);
+  case RISCV::PseudoVMSET_M_B1:
+  case RISCV::PseudoVMSET_M_B2:
+  case RISCV::PseudoVMSET_M_B4:
+  case RISCV::PseudoVMSET_M_B8:
+  case RISCV::PseudoVMSET_M_B16:
+  case RISCV::PseudoVMSET_M_B32:
+  case RISCV::PseudoVMSET_M_B64:
+    // vmset.m vd => vmxnor.mm vd, vd, vd
+    return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
   }
 
   return false;
@@ -213,6 +233,19 @@ bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock &MBB,
   return true;
 }
 
+bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
+                                          MachineBasicBlock::iterator MBBI,
+                                          unsigned Opcode) {
+  DebugLoc DL = MBBI->getDebugLoc();
+  Register DstReg = MBBI->getOperand(0).getReg();
+  const MCInstrDesc &Desc = TII->get(Opcode);
+  BuildMI(MBB, MBBI, DL, Desc, DstReg)
+      .addReg(DstReg, RegState::Undef)
+      .addReg(DstReg, RegState::Undef);
+  MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+  return true;
+}
+
 } // end of anonymous namespace
 
 INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 72e41daa0c3a..0068b5af2a0e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -538,6 +538,23 @@ class VPseudoNullaryMask<VReg RegClass>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+// Nullary for pseudo instructions. They are expanded in
+// RISCVExpandPseudoInsts pass.
+class VPseudoNullaryPseudoM<string BaseInst>
+       : Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []>,
+       RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 1;
+  let SEWIndex = 2;
+  // BaseInstr is not used in RISCVExpandPseudoInsts pass.
+  // Just fill a corresponding real v-inst to pass tablegen check.
+  let BaseInstr = !cast<Instruction>(BaseInst);
+}
+
 // RetClass could be GPR or VReg.
 class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
         Pseudo<(outs RetClass:$rd),
@@ -821,6 +838,14 @@ multiclass VPseudoMaskNullaryV {
   }
 }
 
+multiclass VPseudoNullaryPseudoM <string BaseInst> {
+  foreach mti = AllMasks in {
+    let VLMul = mti.LMul.value in {
+      def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
+    }
+  }
+}
+
 multiclass VPseudoUnaryV_M {
   defvar constraint = "@earlyclobber $rd";
   foreach m = MxList.m in {
@@ -1464,6 +1489,15 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
   }
 }
 
+multiclass VPatNullaryM<string intrinsic, string inst> {
+  foreach mti = AllMasks in
+    def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
+                        (XLenVT GPR:$vl))),
+                        (!cast<Instruction>(inst#"_M_"#mti.BX)
+                        (NoX0 GPR:$vl), mti.SEW)>;
+}
+
+
 multiclass VPatBinary<string intrinsic,
                       string inst,
                       string kind,
@@ -2384,6 +2418,10 @@ defm PseudoVMNOR: VPseudoBinaryM_MM;
 defm PseudoVMORNOT: VPseudoBinaryM_MM;
 defm PseudoVMXNOR: VPseudoBinaryM_MM;
 
+// Pseudo insturctions
+defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
+defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
+
 //===----------------------------------------------------------------------===//
 // 16.2. Vector mask population count vpopc
 //===----------------------------------------------------------------------===//
@@ -2913,6 +2951,10 @@ defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
 defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
 defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
 
+// pseudo instructions
+defm "" : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
+defm "" : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
+
 //===----------------------------------------------------------------------===//
 // 16.2. Vector mask population count vpopc
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
new file mode 100644
index 000000000000..9a36fd61fe0c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll
@@ -0,0 +1,99 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
+    i32 %0)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
+    i32 %0)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
+    i32 %0)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
+    i32 %0)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
+    i32 %0)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
+    i32 %0)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
+    i32 %0)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
new file mode 100644
index 000000000000..91c2af48b6a2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll
@@ -0,0 +1,99 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
+    i64 %0)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
+    i64 %0)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
+    i64 %0)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
+    i64 %0)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
+    i64 %0)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
+    i64 %0)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8
+; CHECK:       vmclr.m {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
+    i64 %0)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
new file mode 100644
index 000000000000..967d316cec71
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll
@@ -0,0 +1,99 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
+  i32);
+
+define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
+    i32 %0)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
+  i32);
+
+define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
+    i32 %0)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
+  i32);
+
+define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
+    i32 %0)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
+  i32);
+
+define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
+    i32 %0)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
+  i32);
+
+define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
+    i32 %0)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
+  i32);
+
+define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
+    i32 %0)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
+  i32);
+
+define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
+    i32 %0)
+
+  ret <vscale x 64 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll
new file mode 100644
index 000000000000..b2a39da8161b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll
@@ -0,0 +1,99 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
+  i64);
+
+define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf8
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
+    i64 %0)
+
+  ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
+  i64);
+
+define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf4
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
+    i64 %0)
+
+  ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
+  i64);
+
+define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1
+; CHECK:       vsetvli {{.*}}, a0, e8,mf2
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
+    i64 %0)
+
+  ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
+  i64);
+
+define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m1
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
+    i64 %0)
+
+  ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
+  i64);
+
+define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m2
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
+    i64 %0)
+
+  ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
+  i64);
+
+define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m4
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
+    i64 %0)
+
+  ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
+  i64);
+
+define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1
+; CHECK:       vsetvli {{.*}}, a0, e8,m8
+; CHECK:       vmset.m {{v[0-9]+}}
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
+    i64 %0)
+
+  ret <vscale x 64 x i1> %a
+}


        


More information about the llvm-branch-commits mailing list