[llvm] [RISCV][ISel] Add ISel support for experimental Zimop extension (PR #77089)

Lyut Nersisyan via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 18 01:00:36 PST 2024


https://github.com/ln8-8 updated https://github.com/llvm/llvm-project/pull/77089

>From caab6046a26517946bdce7659e4236ae9d5154fe Mon Sep 17 00:00:00 2001
From: ln8-8 <lyut.nersisyan at gmail.com>
Date: Fri, 5 Jan 2024 16:08:53 +0400
Subject: [PATCH 1/4] [RISCV][ISel] Add ISel support for experimental Zimop
 extension

This implements ISel support for mopr[0-31] and moprr[0-8] instructions for 32 and 64 bits
---
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  23 +++
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 171 ++++++++++++++++++
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |   7 +
 llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td  |  28 +++
 .../test/CodeGen/RISCV/rv32zimop-intrinsic.ll |  48 +++++
 .../test/CodeGen/RISCV/rv64zimop-intrinsic.ll |  97 ++++++++++
 6 files changed, 374 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index a391bc53cdb0e9..8ddda2a13e5c3b 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -108,6 +108,29 @@ let TargetPrefix = "riscv" in {
   def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
 } // TargetPrefix = "riscv"
 
+//===----------------------------------------------------------------------===//
+// May-Be-Operations
+
+let TargetPrefix = "riscv" in {
+
+  class MOPGPRIntrinsics
+      : DefaultAttrsIntrinsic<[llvm_any_ty],
+                              [LLVMMatchType<0>],
+                              [IntrNoMem, IntrSpeculatable]>;
+  class MOPGPRGPRIntrinsics
+      : DefaultAttrsIntrinsic<[llvm_any_ty],
+                              [LLVMMatchType<0>, LLVMMatchType<0>],
+                              [IntrNoMem, IntrSpeculatable]>;
+
+  // Zimop
+   foreach i = 0...31 in {
+    def int_riscv_mopr#i : MOPGPRIntrinsics;
+   }
+  foreach i = 0...7 in {
+    def int_riscv_moprr#i : MOPGPRGPRIntrinsics;
+  }
+} // TargetPrefix = "riscv"
+
 //===----------------------------------------------------------------------===//
 // Vectors
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index bc4b2b022c0ae9..f8c10fcd139f82 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8404,6 +8404,73 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
   }
+#define RISCV_MOPR_64_CASE(NAME, OPCODE)                                       \
+  case Intrinsic::riscv_##NAME: {                                              \
+    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
+        Op.getValueType() == MVT::i32) {                                       \
+      SDValue NewOp =                                                          \
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        \
+      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                  \
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    \
+    }                                                                          \
+    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1));                  \
+  }
+    RISCV_MOPR_64_CASE(mopr0, RISCVISD::MOPR0)
+    RISCV_MOPR_64_CASE(mopr1, RISCVISD::MOPR1)
+    RISCV_MOPR_64_CASE(mopr2, RISCVISD::MOPR2)
+    RISCV_MOPR_64_CASE(mopr3, RISCVISD::MOPR3)
+    RISCV_MOPR_64_CASE(mopr4, RISCVISD::MOPR4)
+    RISCV_MOPR_64_CASE(mopr5, RISCVISD::MOPR5)
+    RISCV_MOPR_64_CASE(mopr6, RISCVISD::MOPR6)
+    RISCV_MOPR_64_CASE(mopr7, RISCVISD::MOPR7)
+    RISCV_MOPR_64_CASE(mopr8, RISCVISD::MOPR8)
+    RISCV_MOPR_64_CASE(mopr9, RISCVISD::MOPR9)
+    RISCV_MOPR_64_CASE(mopr10, RISCVISD::MOPR10)
+    RISCV_MOPR_64_CASE(mopr11, RISCVISD::MOPR11)
+    RISCV_MOPR_64_CASE(mopr12, RISCVISD::MOPR12)
+    RISCV_MOPR_64_CASE(mopr13, RISCVISD::MOPR13)
+    RISCV_MOPR_64_CASE(mopr14, RISCVISD::MOPR14)
+    RISCV_MOPR_64_CASE(mopr15, RISCVISD::MOPR15)
+    RISCV_MOPR_64_CASE(mopr16, RISCVISD::MOPR16)
+    RISCV_MOPR_64_CASE(mopr17, RISCVISD::MOPR17)
+    RISCV_MOPR_64_CASE(mopr18, RISCVISD::MOPR18)
+    RISCV_MOPR_64_CASE(mopr19, RISCVISD::MOPR19)
+    RISCV_MOPR_64_CASE(mopr20, RISCVISD::MOPR20)
+    RISCV_MOPR_64_CASE(mopr21, RISCVISD::MOPR21)
+    RISCV_MOPR_64_CASE(mopr22, RISCVISD::MOPR22)
+    RISCV_MOPR_64_CASE(mopr23, RISCVISD::MOPR23)
+    RISCV_MOPR_64_CASE(mopr24, RISCVISD::MOPR24)
+    RISCV_MOPR_64_CASE(mopr25, RISCVISD::MOPR25)
+    RISCV_MOPR_64_CASE(mopr26, RISCVISD::MOPR26)
+    RISCV_MOPR_64_CASE(mopr27, RISCVISD::MOPR27)
+    RISCV_MOPR_64_CASE(mopr28, RISCVISD::MOPR28)
+    RISCV_MOPR_64_CASE(mopr29, RISCVISD::MOPR29)
+    RISCV_MOPR_64_CASE(mopr30, RISCVISD::MOPR30)
+    RISCV_MOPR_64_CASE(mopr31, RISCVISD::MOPR31)
+#undef RISCV_MOPR_64_CASE
+#define RISCV_MOPRR_64_CASE(NAME, OPCODE)                                      \
+  case Intrinsic::riscv_##NAME: {                                              \
+    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
+        Op.getValueType() == MVT::i32) {                                       \
+      SDValue NewOp0 =                                                         \
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        \
+      SDValue NewOp1 =                                                         \
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));        \
+      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);         \
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    \
+    }                                                                          \
+    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1),                   \
+                       Op.getOperand(2));                                      \
+  }
+    RISCV_MOPRR_64_CASE(moprr0, RISCVISD::MOPRR0)
+    RISCV_MOPRR_64_CASE(moprr1, RISCVISD::MOPRR1)
+    RISCV_MOPRR_64_CASE(moprr2, RISCVISD::MOPRR2)
+    RISCV_MOPRR_64_CASE(moprr3, RISCVISD::MOPRR3)
+    RISCV_MOPRR_64_CASE(moprr4, RISCVISD::MOPRR4)
+    RISCV_MOPRR_64_CASE(moprr5, RISCVISD::MOPRR5)
+    RISCV_MOPRR_64_CASE(moprr6, RISCVISD::MOPRR6)
+    RISCV_MOPRR_64_CASE(moprr7, RISCVISD::MOPRR7)
+#undef RISCV_MOPRR_64_CASE
   case Intrinsic::riscv_clmul:
     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
       SDValue NewOp0 =
@@ -11794,6 +11861,70 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
+#define RISCV_MOPR_CASE(NAME, OPCODE)                                          \
+  case Intrinsic::riscv_##NAME: {                                              \
+    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
+      return;                                                                  \
+    SDValue NewOp =                                                            \
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          \
+    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                    \
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
+    return;                                                                    \
+  }
+      RISCV_MOPR_CASE(mopr0, RISCVISD::MOPR0)
+      RISCV_MOPR_CASE(mopr1, RISCVISD::MOPR1)
+      RISCV_MOPR_CASE(mopr2, RISCVISD::MOPR2)
+      RISCV_MOPR_CASE(mopr3, RISCVISD::MOPR3)
+      RISCV_MOPR_CASE(mopr4, RISCVISD::MOPR4)
+      RISCV_MOPR_CASE(mopr5, RISCVISD::MOPR5)
+      RISCV_MOPR_CASE(mopr6, RISCVISD::MOPR6)
+      RISCV_MOPR_CASE(mopr7, RISCVISD::MOPR7)
+      RISCV_MOPR_CASE(mopr8, RISCVISD::MOPR8)
+      RISCV_MOPR_CASE(mopr9, RISCVISD::MOPR9)
+      RISCV_MOPR_CASE(mopr10, RISCVISD::MOPR10)
+      RISCV_MOPR_CASE(mopr11, RISCVISD::MOPR11)
+      RISCV_MOPR_CASE(mopr12, RISCVISD::MOPR12)
+      RISCV_MOPR_CASE(mopr13, RISCVISD::MOPR13)
+      RISCV_MOPR_CASE(mopr14, RISCVISD::MOPR14)
+      RISCV_MOPR_CASE(mopr15, RISCVISD::MOPR15)
+      RISCV_MOPR_CASE(mopr16, RISCVISD::MOPR16)
+      RISCV_MOPR_CASE(mopr17, RISCVISD::MOPR17)
+      RISCV_MOPR_CASE(mopr18, RISCVISD::MOPR18)
+      RISCV_MOPR_CASE(mopr19, RISCVISD::MOPR19)
+      RISCV_MOPR_CASE(mopr20, RISCVISD::MOPR20)
+      RISCV_MOPR_CASE(mopr21, RISCVISD::MOPR21)
+      RISCV_MOPR_CASE(mopr22, RISCVISD::MOPR22)
+      RISCV_MOPR_CASE(mopr23, RISCVISD::MOPR23)
+      RISCV_MOPR_CASE(mopr24, RISCVISD::MOPR24)
+      RISCV_MOPR_CASE(mopr25, RISCVISD::MOPR25)
+      RISCV_MOPR_CASE(mopr26, RISCVISD::MOPR26)
+      RISCV_MOPR_CASE(mopr27, RISCVISD::MOPR27)
+      RISCV_MOPR_CASE(mopr28, RISCVISD::MOPR28)
+      RISCV_MOPR_CASE(mopr29, RISCVISD::MOPR29)
+      RISCV_MOPR_CASE(mopr30, RISCVISD::MOPR30)
+      RISCV_MOPR_CASE(mopr31, RISCVISD::MOPR31)
+#undef RISCV_MOPR_CASE
+#define RISCV_MOPRR_CASE(NAME, OPCODE)                                         \
+  case Intrinsic::riscv_##NAME: {                                              \
+    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
+      return;                                                                  \
+    SDValue NewOp0 =                                                           \
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          \
+    SDValue NewOp1 =                                                           \
+        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));          \
+    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);           \
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
+    return;                                                                    \
+  }
+      RISCV_MOPRR_CASE(moprr0, RISCVISD::MOPRR0)
+      RISCV_MOPRR_CASE(moprr1, RISCVISD::MOPRR1)
+      RISCV_MOPRR_CASE(moprr2, RISCVISD::MOPRR2)
+      RISCV_MOPRR_CASE(moprr3, RISCVISD::MOPRR3)
+      RISCV_MOPRR_CASE(moprr4, RISCVISD::MOPRR4)
+      RISCV_MOPRR_CASE(moprr5, RISCVISD::MOPRR5)
+      RISCV_MOPRR_CASE(moprr6, RISCVISD::MOPRR6)
+      RISCV_MOPRR_CASE(moprr7, RISCVISD::MOPRR7)
+#undef RISCV_MOPRR_CASE
     case Intrinsic::riscv_clmul: {
       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
         return;
@@ -18549,6 +18680,46 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(CLMUL)
   NODE_NAME_CASE(CLMULH)
   NODE_NAME_CASE(CLMULR)
+  NODE_NAME_CASE(MOPR0)
+  NODE_NAME_CASE(MOPR1)
+  NODE_NAME_CASE(MOPR2)
+  NODE_NAME_CASE(MOPR3)
+  NODE_NAME_CASE(MOPR4)
+  NODE_NAME_CASE(MOPR5)
+  NODE_NAME_CASE(MOPR6)
+  NODE_NAME_CASE(MOPR7)
+  NODE_NAME_CASE(MOPR8)
+  NODE_NAME_CASE(MOPR9)
+  NODE_NAME_CASE(MOPR10)
+  NODE_NAME_CASE(MOPR11)
+  NODE_NAME_CASE(MOPR12)
+  NODE_NAME_CASE(MOPR13)
+  NODE_NAME_CASE(MOPR14)
+  NODE_NAME_CASE(MOPR15)
+  NODE_NAME_CASE(MOPR16)
+  NODE_NAME_CASE(MOPR17)
+  NODE_NAME_CASE(MOPR18)
+  NODE_NAME_CASE(MOPR19)
+  NODE_NAME_CASE(MOPR20)
+  NODE_NAME_CASE(MOPR21)
+  NODE_NAME_CASE(MOPR22)
+  NODE_NAME_CASE(MOPR23)
+  NODE_NAME_CASE(MOPR24)
+  NODE_NAME_CASE(MOPR25)
+  NODE_NAME_CASE(MOPR26)
+  NODE_NAME_CASE(MOPR27)
+  NODE_NAME_CASE(MOPR28)
+  NODE_NAME_CASE(MOPR29)
+  NODE_NAME_CASE(MOPR30)
+  NODE_NAME_CASE(MOPR31)
+  NODE_NAME_CASE(MOPRR0)
+  NODE_NAME_CASE(MOPRR1)
+  NODE_NAME_CASE(MOPRR2)
+  NODE_NAME_CASE(MOPRR3)
+  NODE_NAME_CASE(MOPRR4)
+  NODE_NAME_CASE(MOPRR5)
+  NODE_NAME_CASE(MOPRR6)
+  NODE_NAME_CASE(MOPRR7)
   NODE_NAME_CASE(SHA256SIG0)
   NODE_NAME_CASE(SHA256SIG1)
   NODE_NAME_CASE(SHA256SUM0)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 18f58057558166..4fa8e50d141b51 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -142,6 +142,13 @@ enum NodeType : unsigned {
   SM4KS, SM4ED,
   SM3P0, SM3P1,
 
+  // May-Be-Operations
+  MOPR0, MOPR1, MOPR2, MOPR3, MOPR4, MOPR5, MOPR6, MOPR7, MOPR8, MOPR9, MOPR10,
+  MOPR11, MOPR12, MOPR13, MOPR14, MOPR15, MOPR16, MOPR17, MOPR18, MOPR19,
+  MOPR20, MOPR21, MOPR22, MOPR23, MOPR24, MOPR25, MOPR26, MOPR27, MOPR28,
+  MOPR29, MOPR30, MOPR31, MOPRR0, MOPRR1, MOPRR2, MOPRR3, MOPRR4, MOPRR5,
+  MOPRR6, MOPRR7,
+
   // Vector Extension
   FIRST_VL_VECTOR_OP,
   // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
index 1e8c70046c6347..8f60b1badcb4af 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
@@ -34,6 +34,15 @@ class RVInstRMoprr<bits<4> imm4, bits<3> imm3, bits<3> funct3, RISCVOpcode opcod
   let Inst{25} = imm4{0};
 }
 
+foreach i = 0...31 in {
+  defvar riscvisd_moprx = "RISCVISD::MOPR"#i;
+  def riscv_mopr#i : SDNode<riscvisd_moprx,  SDTIntUnaryOp>;
+}
+foreach i = 0...7 in {
+  defvar riscvisd_moprrx = "RISCVISD::MOPRR"#i;
+  def riscv_moprr#i : SDNode<riscvisd_moprrx,  SDTIntBinOp>;
+}
+
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 class RVMopr<bits<7> imm7, bits<5> imm5, bits<3> funct3,
              RISCVOpcode opcode, string opcodestr>
@@ -57,3 +66,22 @@ foreach i = 0...7 in {
   def MOPRR#i : RVMoprr<0b1001, i, 0b100, OPC_SYSTEM, "mop.rr."#i>,
                 Sched<[]>;
 }
+
+// Zimop instructions
+foreach i = 0...31 in {
+    defvar moprx = !cast<Instruction>("MOPR"#i);
+    defvar riscv_moprx = !cast<SDNode>("riscv_mopr"#i);
+    let Predicates = [HasStdExtZimop] in {
+    def : Pat<(XLenVT (riscv_moprx (XLenVT GPR:$rs1))),
+              (moprx GPR:$rs1)>;
+    } // Predicates = [HasStdExtZimop]
+}
+
+foreach i = 0...7 in {
+    defvar moprrx = !cast<Instruction>("MOPRR"#i);
+    defvar riscv_moprrx = !cast<SDNode>("riscv_moprr"#i);
+    let Predicates = [HasStdExtZimop] in {
+    def : Pat<(XLenVT (riscv_moprrx (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))),
+              (moprrx GPR:$rs1, GPR:$rs2)>;
+    } // Predicates = [HasStdExtZimop]
+}
diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
new file mode 100644
index 00000000000000..bd1e369fc747a4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zimop -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32ZIMOP
+
+declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+
+define i32 @mopr0_32(i32 %a) nounwind {
+; RV32ZIMOP-LABEL: mopr0_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.mopr31.i32(i32 %a)
+
+define i32 @mopr31_32(i32 %a) nounwind {
+; RV32ZIMOP-LABEL: mopr31_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+
+define i32 @moprr0_32(i32 %a, i32 %b) nounwind {
+; RV32ZIMOP-LABEL: moprr0_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+
+define i32 @moprr7_32(i32 %a, i32 %b) nounwind {
+; RV32ZIMOP-LABEL: moprr7_32:
+; RV32ZIMOP:       # %bb.0:
+; RV32ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV32ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+
diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
new file mode 100644
index 00000000000000..209aad89cbc29e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zimop -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZIMOP
+
+declare i64 @llvm.riscv.mopr0.i64(i64 %a)
+
+define i64 @mopr0_64(i64 %a) nounwind {
+; RV64ZIMOP-LABEL: mopr0_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.mopr0.i64(i64 %a)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.mopr31.i64(i64 %a)
+
+define i64 @mopr31_64(i64 %a) nounwind {
+; RV64ZIMOP-LABEL: mopr31_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.mopr31.i64(i64 %a)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+
+define i64 @moprr0_64(i64 %a, i64 %b) nounwind {
+; RV64ZIMOP-LABEL: moprr0_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+  ret i64 %tmp
+}
+
+declare i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
+
+define i64 @moprr7_64(i64 %a, i64 %b) nounwind {
+; RV64ZIMOP-LABEL: moprr7_64:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
+  ret i64 %tmp
+}
+
+
+declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+
+define signext i32 @mopr0_32(i32 signext %a) nounwind {
+; RV64ZIMOP-LABEL: mopr0_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.mopr31.i32(i32 %a)
+
+define signext i32 @mopr31_32(i32 signext %a) nounwind {
+; RV64ZIMOP-LABEL: mopr31_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+
+define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZIMOP-LABEL: moprr0_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+
+declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+
+define signext i32 @moprr7_32(i32 signext %a, i32 signext %b) nounwind {
+; RV64ZIMOP-LABEL: moprr7_32:
+; RV64ZIMOP:       # %bb.0:
+; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
+; RV64ZIMOP-NEXT:    sext.w a0, a0
+; RV64ZIMOP-NEXT:    ret
+  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  ret i32 %tmp
+}
+

>From af79891fe5d67a52b77cc62cdb94218de449edc2 Mon Sep 17 00:00:00 2001
From: ln8-8 <lyut.nersisyan at gmail.com>
Date: Mon, 8 Jan 2024 18:31:15 +0400
Subject: [PATCH 2/4] Started to have one intrinsic each for mop.r and mop.rr

---
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  13 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 197 ++++++++++--------
 .../test/CodeGen/RISCV/rv32zimop-intrinsic.ll |  16 +-
 .../test/CodeGen/RISCV/rv64zimop-intrinsic.ll |  33 ++-
 4 files changed, 132 insertions(+), 127 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 8ddda2a13e5c3b..9a5b6563cfc5e2 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -115,20 +115,17 @@ let TargetPrefix = "riscv" in {
 
   class MOPGPRIntrinsics
       : DefaultAttrsIntrinsic<[llvm_any_ty],
-                              [LLVMMatchType<0>],
+                              [LLVMMatchType<0>, LLVMMatchType<0>],
                               [IntrNoMem, IntrSpeculatable]>;
   class MOPGPRGPRIntrinsics
       : DefaultAttrsIntrinsic<[llvm_any_ty],
-                              [LLVMMatchType<0>, LLVMMatchType<0>],
+                              [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                               [IntrNoMem, IntrSpeculatable]>;
 
   // Zimop
-   foreach i = 0...31 in {
-    def int_riscv_mopr#i : MOPGPRIntrinsics;
-   }
-  foreach i = 0...7 in {
-    def int_riscv_moprr#i : MOPGPRGPRIntrinsics;
-  }
+  def int_riscv_mopr : MOPGPRIntrinsics;
+  def int_riscv_moprr : MOPGPRGPRIntrinsics;
+
 } // TargetPrefix = "riscv"
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f8c10fcd139f82..d1063b5a88e17a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8404,8 +8404,11 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
   }
-#define RISCV_MOPR_64_CASE(NAME, OPCODE)                                       \
-  case Intrinsic::riscv_##NAME: {                                              \
+  case Intrinsic::riscv_mopr: {
+    unsigned mopr_id = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+    switch (mopr_id) {
+#define RISCV_MOPR_64_CASE(MOPR_ID, OPCODE)                                    \
+  case MOPR_ID: {                                                              \
     if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
         Op.getValueType() == MVT::i32) {                                       \
       SDValue NewOp =                                                          \
@@ -8415,41 +8418,46 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     }                                                                          \
     return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1));                  \
   }
-    RISCV_MOPR_64_CASE(mopr0, RISCVISD::MOPR0)
-    RISCV_MOPR_64_CASE(mopr1, RISCVISD::MOPR1)
-    RISCV_MOPR_64_CASE(mopr2, RISCVISD::MOPR2)
-    RISCV_MOPR_64_CASE(mopr3, RISCVISD::MOPR3)
-    RISCV_MOPR_64_CASE(mopr4, RISCVISD::MOPR4)
-    RISCV_MOPR_64_CASE(mopr5, RISCVISD::MOPR5)
-    RISCV_MOPR_64_CASE(mopr6, RISCVISD::MOPR6)
-    RISCV_MOPR_64_CASE(mopr7, RISCVISD::MOPR7)
-    RISCV_MOPR_64_CASE(mopr8, RISCVISD::MOPR8)
-    RISCV_MOPR_64_CASE(mopr9, RISCVISD::MOPR9)
-    RISCV_MOPR_64_CASE(mopr10, RISCVISD::MOPR10)
-    RISCV_MOPR_64_CASE(mopr11, RISCVISD::MOPR11)
-    RISCV_MOPR_64_CASE(mopr12, RISCVISD::MOPR12)
-    RISCV_MOPR_64_CASE(mopr13, RISCVISD::MOPR13)
-    RISCV_MOPR_64_CASE(mopr14, RISCVISD::MOPR14)
-    RISCV_MOPR_64_CASE(mopr15, RISCVISD::MOPR15)
-    RISCV_MOPR_64_CASE(mopr16, RISCVISD::MOPR16)
-    RISCV_MOPR_64_CASE(mopr17, RISCVISD::MOPR17)
-    RISCV_MOPR_64_CASE(mopr18, RISCVISD::MOPR18)
-    RISCV_MOPR_64_CASE(mopr19, RISCVISD::MOPR19)
-    RISCV_MOPR_64_CASE(mopr20, RISCVISD::MOPR20)
-    RISCV_MOPR_64_CASE(mopr21, RISCVISD::MOPR21)
-    RISCV_MOPR_64_CASE(mopr22, RISCVISD::MOPR22)
-    RISCV_MOPR_64_CASE(mopr23, RISCVISD::MOPR23)
-    RISCV_MOPR_64_CASE(mopr24, RISCVISD::MOPR24)
-    RISCV_MOPR_64_CASE(mopr25, RISCVISD::MOPR25)
-    RISCV_MOPR_64_CASE(mopr26, RISCVISD::MOPR26)
-    RISCV_MOPR_64_CASE(mopr27, RISCVISD::MOPR27)
-    RISCV_MOPR_64_CASE(mopr28, RISCVISD::MOPR28)
-    RISCV_MOPR_64_CASE(mopr29, RISCVISD::MOPR29)
-    RISCV_MOPR_64_CASE(mopr30, RISCVISD::MOPR30)
-    RISCV_MOPR_64_CASE(mopr31, RISCVISD::MOPR31)
+      RISCV_MOPR_64_CASE(0, RISCVISD::MOPR0)
+      RISCV_MOPR_64_CASE(1, RISCVISD::MOPR1)
+      RISCV_MOPR_64_CASE(2, RISCVISD::MOPR2)
+      RISCV_MOPR_64_CASE(3, RISCVISD::MOPR3)
+      RISCV_MOPR_64_CASE(4, RISCVISD::MOPR4)
+      RISCV_MOPR_64_CASE(5, RISCVISD::MOPR5)
+      RISCV_MOPR_64_CASE(6, RISCVISD::MOPR6)
+      RISCV_MOPR_64_CASE(7, RISCVISD::MOPR7)
+      RISCV_MOPR_64_CASE(8, RISCVISD::MOPR8)
+      RISCV_MOPR_64_CASE(9, RISCVISD::MOPR9)
+      RISCV_MOPR_64_CASE(10, RISCVISD::MOPR10)
+      RISCV_MOPR_64_CASE(11, RISCVISD::MOPR11)
+      RISCV_MOPR_64_CASE(12, RISCVISD::MOPR12)
+      RISCV_MOPR_64_CASE(13, RISCVISD::MOPR13)
+      RISCV_MOPR_64_CASE(14, RISCVISD::MOPR14)
+      RISCV_MOPR_64_CASE(15, RISCVISD::MOPR15)
+      RISCV_MOPR_64_CASE(16, RISCVISD::MOPR16)
+      RISCV_MOPR_64_CASE(17, RISCVISD::MOPR17)
+      RISCV_MOPR_64_CASE(18, RISCVISD::MOPR18)
+      RISCV_MOPR_64_CASE(19, RISCVISD::MOPR19)
+      RISCV_MOPR_64_CASE(20, RISCVISD::MOPR20)
+      RISCV_MOPR_64_CASE(21, RISCVISD::MOPR21)
+      RISCV_MOPR_64_CASE(22, RISCVISD::MOPR22)
+      RISCV_MOPR_64_CASE(23, RISCVISD::MOPR23)
+      RISCV_MOPR_64_CASE(24, RISCVISD::MOPR24)
+      RISCV_MOPR_64_CASE(25, RISCVISD::MOPR25)
+      RISCV_MOPR_64_CASE(26, RISCVISD::MOPR26)
+      RISCV_MOPR_64_CASE(27, RISCVISD::MOPR27)
+      RISCV_MOPR_64_CASE(28, RISCVISD::MOPR28)
+      RISCV_MOPR_64_CASE(29, RISCVISD::MOPR29)
+      RISCV_MOPR_64_CASE(30, RISCVISD::MOPR30)
+      RISCV_MOPR_64_CASE(31, RISCVISD::MOPR31)
 #undef RISCV_MOPR_64_CASE
-#define RISCV_MOPRR_64_CASE(NAME, OPCODE)                                      \
-  case Intrinsic::riscv_##NAME: {                                              \
+    }
+  }
+  case Intrinsic::riscv_moprr: {
+    unsigned moprr_id = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+    switch (moprr_id) {
+#define RISCV_MOPRR_64_CASE(MOPRR_ID, OPCODE)                                  \
+  case MOPRR_ID: {                                                             \
     if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
         Op.getValueType() == MVT::i32) {                                       \
       SDValue NewOp0 =                                                         \
@@ -8462,15 +8470,17 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1),                   \
                        Op.getOperand(2));                                      \
   }
-    RISCV_MOPRR_64_CASE(moprr0, RISCVISD::MOPRR0)
-    RISCV_MOPRR_64_CASE(moprr1, RISCVISD::MOPRR1)
-    RISCV_MOPRR_64_CASE(moprr2, RISCVISD::MOPRR2)
-    RISCV_MOPRR_64_CASE(moprr3, RISCVISD::MOPRR3)
-    RISCV_MOPRR_64_CASE(moprr4, RISCVISD::MOPRR4)
-    RISCV_MOPRR_64_CASE(moprr5, RISCVISD::MOPRR5)
-    RISCV_MOPRR_64_CASE(moprr6, RISCVISD::MOPRR6)
-    RISCV_MOPRR_64_CASE(moprr7, RISCVISD::MOPRR7)
+      RISCV_MOPRR_64_CASE(0, RISCVISD::MOPRR0)
+      RISCV_MOPRR_64_CASE(1, RISCVISD::MOPRR1)
+      RISCV_MOPRR_64_CASE(2, RISCVISD::MOPRR2)
+      RISCV_MOPRR_64_CASE(3, RISCVISD::MOPRR3)
+      RISCV_MOPRR_64_CASE(4, RISCVISD::MOPRR4)
+      RISCV_MOPRR_64_CASE(5, RISCVISD::MOPRR5)
+      RISCV_MOPRR_64_CASE(6, RISCVISD::MOPRR6)
+      RISCV_MOPRR_64_CASE(7, RISCVISD::MOPRR7)
 #undef RISCV_MOPRR_64_CASE
+    }
+  }
   case Intrinsic::riscv_clmul:
     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
       SDValue NewOp0 =
@@ -11861,8 +11871,11 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
       return;
     }
-#define RISCV_MOPR_CASE(NAME, OPCODE)                                          \
-  case Intrinsic::riscv_##NAME: {                                              \
+    case Intrinsic::riscv_mopr: {
+      unsigned mopr_id = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+      switch (mopr_id) {
+#define RISCV_MOPR_CASE(MOPR_ID, OPCODE)                                       \
+  case MOPR_ID: {                                                              \
     if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
       return;                                                                  \
     SDValue NewOp =                                                            \
@@ -11871,41 +11884,47 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
     return;                                                                    \
   }
-      RISCV_MOPR_CASE(mopr0, RISCVISD::MOPR0)
-      RISCV_MOPR_CASE(mopr1, RISCVISD::MOPR1)
-      RISCV_MOPR_CASE(mopr2, RISCVISD::MOPR2)
-      RISCV_MOPR_CASE(mopr3, RISCVISD::MOPR3)
-      RISCV_MOPR_CASE(mopr4, RISCVISD::MOPR4)
-      RISCV_MOPR_CASE(mopr5, RISCVISD::MOPR5)
-      RISCV_MOPR_CASE(mopr6, RISCVISD::MOPR6)
-      RISCV_MOPR_CASE(mopr7, RISCVISD::MOPR7)
-      RISCV_MOPR_CASE(mopr8, RISCVISD::MOPR8)
-      RISCV_MOPR_CASE(mopr9, RISCVISD::MOPR9)
-      RISCV_MOPR_CASE(mopr10, RISCVISD::MOPR10)
-      RISCV_MOPR_CASE(mopr11, RISCVISD::MOPR11)
-      RISCV_MOPR_CASE(mopr12, RISCVISD::MOPR12)
-      RISCV_MOPR_CASE(mopr13, RISCVISD::MOPR13)
-      RISCV_MOPR_CASE(mopr14, RISCVISD::MOPR14)
-      RISCV_MOPR_CASE(mopr15, RISCVISD::MOPR15)
-      RISCV_MOPR_CASE(mopr16, RISCVISD::MOPR16)
-      RISCV_MOPR_CASE(mopr17, RISCVISD::MOPR17)
-      RISCV_MOPR_CASE(mopr18, RISCVISD::MOPR18)
-      RISCV_MOPR_CASE(mopr19, RISCVISD::MOPR19)
-      RISCV_MOPR_CASE(mopr20, RISCVISD::MOPR20)
-      RISCV_MOPR_CASE(mopr21, RISCVISD::MOPR21)
-      RISCV_MOPR_CASE(mopr22, RISCVISD::MOPR22)
-      RISCV_MOPR_CASE(mopr23, RISCVISD::MOPR23)
-      RISCV_MOPR_CASE(mopr24, RISCVISD::MOPR24)
-      RISCV_MOPR_CASE(mopr25, RISCVISD::MOPR25)
-      RISCV_MOPR_CASE(mopr26, RISCVISD::MOPR26)
-      RISCV_MOPR_CASE(mopr27, RISCVISD::MOPR27)
-      RISCV_MOPR_CASE(mopr28, RISCVISD::MOPR28)
-      RISCV_MOPR_CASE(mopr29, RISCVISD::MOPR29)
-      RISCV_MOPR_CASE(mopr30, RISCVISD::MOPR30)
-      RISCV_MOPR_CASE(mopr31, RISCVISD::MOPR31)
+        RISCV_MOPR_CASE(0, RISCVISD::MOPR0)
+        RISCV_MOPR_CASE(1, RISCVISD::MOPR1)
+        RISCV_MOPR_CASE(2, RISCVISD::MOPR2)
+        RISCV_MOPR_CASE(3, RISCVISD::MOPR3)
+        RISCV_MOPR_CASE(4, RISCVISD::MOPR4)
+        RISCV_MOPR_CASE(5, RISCVISD::MOPR5)
+        RISCV_MOPR_CASE(6, RISCVISD::MOPR6)
+        RISCV_MOPR_CASE(7, RISCVISD::MOPR7)
+        RISCV_MOPR_CASE(8, RISCVISD::MOPR8)
+        RISCV_MOPR_CASE(9, RISCVISD::MOPR9)
+        RISCV_MOPR_CASE(10, RISCVISD::MOPR10)
+        RISCV_MOPR_CASE(11, RISCVISD::MOPR11)
+        RISCV_MOPR_CASE(12, RISCVISD::MOPR12)
+        RISCV_MOPR_CASE(13, RISCVISD::MOPR13)
+        RISCV_MOPR_CASE(14, RISCVISD::MOPR14)
+        RISCV_MOPR_CASE(15, RISCVISD::MOPR15)
+        RISCV_MOPR_CASE(16, RISCVISD::MOPR16)
+        RISCV_MOPR_CASE(17, RISCVISD::MOPR17)
+        RISCV_MOPR_CASE(18, RISCVISD::MOPR18)
+        RISCV_MOPR_CASE(19, RISCVISD::MOPR19)
+        RISCV_MOPR_CASE(20, RISCVISD::MOPR20)
+        RISCV_MOPR_CASE(21, RISCVISD::MOPR21)
+        RISCV_MOPR_CASE(22, RISCVISD::MOPR22)
+        RISCV_MOPR_CASE(23, RISCVISD::MOPR23)
+        RISCV_MOPR_CASE(24, RISCVISD::MOPR24)
+        RISCV_MOPR_CASE(25, RISCVISD::MOPR25)
+        RISCV_MOPR_CASE(26, RISCVISD::MOPR26)
+        RISCV_MOPR_CASE(27, RISCVISD::MOPR27)
+        RISCV_MOPR_CASE(28, RISCVISD::MOPR28)
+        RISCV_MOPR_CASE(29, RISCVISD::MOPR29)
+        RISCV_MOPR_CASE(30, RISCVISD::MOPR30)
+        RISCV_MOPR_CASE(31, RISCVISD::MOPR31)
 #undef RISCV_MOPR_CASE
-#define RISCV_MOPRR_CASE(NAME, OPCODE)                                         \
-  case Intrinsic::riscv_##NAME: {                                              \
+      }
+    }
+    case Intrinsic::riscv_moprr: {
+      unsigned moprr_id =
+          cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
+      switch (moprr_id) {
+#define RISCV_MOPRR_CASE(MOPRR_ID, OPCODE)                                     \
+  case MOPRR_ID: {                                                             \
     if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
       return;                                                                  \
     SDValue NewOp0 =                                                           \
@@ -11916,15 +11935,17 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
     return;                                                                    \
   }
-      RISCV_MOPRR_CASE(moprr0, RISCVISD::MOPRR0)
-      RISCV_MOPRR_CASE(moprr1, RISCVISD::MOPRR1)
-      RISCV_MOPRR_CASE(moprr2, RISCVISD::MOPRR2)
-      RISCV_MOPRR_CASE(moprr3, RISCVISD::MOPRR3)
-      RISCV_MOPRR_CASE(moprr4, RISCVISD::MOPRR4)
-      RISCV_MOPRR_CASE(moprr5, RISCVISD::MOPRR5)
-      RISCV_MOPRR_CASE(moprr6, RISCVISD::MOPRR6)
-      RISCV_MOPRR_CASE(moprr7, RISCVISD::MOPRR7)
+        RISCV_MOPRR_CASE(0, RISCVISD::MOPRR0)
+        RISCV_MOPRR_CASE(1, RISCVISD::MOPRR1)
+        RISCV_MOPRR_CASE(2, RISCVISD::MOPRR2)
+        RISCV_MOPRR_CASE(3, RISCVISD::MOPRR3)
+        RISCV_MOPRR_CASE(4, RISCVISD::MOPRR4)
+        RISCV_MOPRR_CASE(5, RISCVISD::MOPRR5)
+        RISCV_MOPRR_CASE(6, RISCVISD::MOPRR6)
+        RISCV_MOPRR_CASE(7, RISCVISD::MOPRR7)
 #undef RISCV_MOPRR_CASE
+      }
+    }
     case Intrinsic::riscv_clmul: {
       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
         return;
diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
index bd1e369fc747a4..e5f36086f1cfcd 100644
--- a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll
@@ -2,47 +2,43 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zimop -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV32ZIMOP
 
-declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b)
 
 define i32 @mopr0_32(i32 %a) nounwind {
 ; RV32ZIMOP-LABEL: mopr0_32:
 ; RV32ZIMOP:       # %bb.0:
 ; RV32ZIMOP-NEXT:    mop.r.0 a0, a0
 ; RV32ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 0)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.mopr31.i32(i32 %a)
-
 define i32 @mopr31_32(i32 %a) nounwind {
 ; RV32ZIMOP-LABEL: mopr31_32:
 ; RV32ZIMOP:       # %bb.0:
 ; RV32ZIMOP-NEXT:    mop.r.31 a0, a0
 ; RV32ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 31)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c)
 
 define i32 @moprr0_32(i32 %a, i32 %b) nounwind {
 ; RV32ZIMOP-LABEL: moprr0_32:
 ; RV32ZIMOP:       # %bb.0:
 ; RV32ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
 ; RV32ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 0)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
-
 define i32 @moprr7_32(i32 %a, i32 %b) nounwind {
 ; RV32ZIMOP-LABEL: moprr7_32:
 ; RV32ZIMOP:       # %bb.0:
 ; RV32ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
 ; RV32ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 7)
   ret i32 %tmp
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
index 209aad89cbc29e..cd57739a955d5e 100644
--- a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll
@@ -2,52 +2,47 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zimop -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s -check-prefix=RV64ZIMOP
 
-declare i64 @llvm.riscv.mopr0.i64(i64 %a)
+declare i64 @llvm.riscv.mopr.i64(i64 %a, i64 %b)
 
 define i64 @mopr0_64(i64 %a) nounwind {
 ; RV64ZIMOP-LABEL: mopr0_64:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.mopr0.i64(i64 %a)
+  %tmp = call i64 @llvm.riscv.mopr.i64(i64 %a, i64 0)
   ret i64 %tmp
 }
 
-declare i64 @llvm.riscv.mopr31.i64(i64 %a)
-
 define i64 @mopr31_64(i64 %a) nounwind {
 ; RV64ZIMOP-LABEL: mopr31_64:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.mopr31.i64(i64 %a)
+  %tmp = call i64 @llvm.riscv.mopr.i64(i64 %a, i64 31)
   ret i64 %tmp
 }
 
-declare i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+declare i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 %c)
 
 define i64 @moprr0_64(i64 %a, i64 %b) nounwind {
 ; RV64ZIMOP-LABEL: moprr0_64:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.moprr0.i64(i64 %a, i64 %b)
+  %tmp = call i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 0)
   ret i64 %tmp
 }
 
-declare i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
-
 define i64 @moprr7_64(i64 %a, i64 %b) nounwind {
 ; RV64ZIMOP-LABEL: moprr7_64:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i64 @llvm.riscv.moprr7.i64(i64 %a, i64 %b)
+  %tmp = call i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 7)
   ret i64 %tmp
 }
 
-
-declare i32 @llvm.riscv.mopr0.i32(i32 %a)
+declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b)
 
 define signext i32 @mopr0_32(i32 signext %a) nounwind {
 ; RV64ZIMOP-LABEL: mopr0_32:
@@ -55,23 +50,21 @@ define signext i32 @mopr0_32(i32 signext %a) nounwind {
 ; RV64ZIMOP-NEXT:    mop.r.0 a0, a0
 ; RV64ZIMOP-NEXT:    sext.w a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.mopr0.i32(i32 %a)
+  %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 0)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.mopr31.i32(i32 %a)
-
 define signext i32 @mopr31_32(i32 signext %a) nounwind {
 ; RV64ZIMOP-LABEL: mopr31_32:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.r.31 a0, a0
 ; RV64ZIMOP-NEXT:    sext.w a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.mopr31.i32(i32 %a)
+  %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 31)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c)
 
 define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64ZIMOP-LABEL: moprr0_32:
@@ -79,19 +72,17 @@ define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64ZIMOP-NEXT:    mop.rr.0 a0, a0, a1
 ; RV64ZIMOP-NEXT:    sext.w a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.moprr0.i32(i32 %a, i32 %b)
+  %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 0)
   ret i32 %tmp
 }
 
-declare i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
-
 define signext i32 @moprr7_32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64ZIMOP-LABEL: moprr7_32:
 ; RV64ZIMOP:       # %bb.0:
 ; RV64ZIMOP-NEXT:    mop.rr.7 a0, a0, a1
 ; RV64ZIMOP-NEXT:    sext.w a0, a0
 ; RV64ZIMOP-NEXT:    ret
-  %tmp = call i32 @llvm.riscv.moprr7.i32(i32 %a, i32 %b)
+  %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 7)
   ret i32 %tmp
 }
 

>From 1d53259eda974514caa992aa5b93162c49e991c4 Mon Sep 17 00:00:00 2001
From: ln8-8 <lyut.nersisyan at gmail.com>
Date: Mon, 15 Jan 2024 17:55:42 +0400
Subject: [PATCH 3/4] fixed intrinsic for constant arguments

---
 llvm/include/llvm/IR/IntrinsicsRISCV.td | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 9a5b6563cfc5e2..1a1f80fd5425cb 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -113,18 +113,15 @@ let TargetPrefix = "riscv" in {
 
 let TargetPrefix = "riscv" in {
 
-  class MOPGPRIntrinsics
+  // Zimop
+  def int_riscv_mopr
       : DefaultAttrsIntrinsic<[llvm_any_ty],
                               [LLVMMatchType<0>, LLVMMatchType<0>],
-                              [IntrNoMem, IntrSpeculatable]>;
-  class MOPGPRGPRIntrinsics
+                              [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
+  def int_riscv_moprr
       : DefaultAttrsIntrinsic<[llvm_any_ty],
                               [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
-                              [IntrNoMem, IntrSpeculatable]>;
-
-  // Zimop
-  def int_riscv_mopr : MOPGPRIntrinsics;
-  def int_riscv_moprr : MOPGPRGPRIntrinsics;
+                              [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
 
 } // TargetPrefix = "riscv"
 

>From b6403573e2e91167b6d5a0875e31a14b88d5bfa9 Mon Sep 17 00:00:00 2001
From: ln8-8 <lyut.nersisyan at gmail.com>
Date: Thu, 18 Jan 2024 12:59:41 +0400
Subject: [PATCH 4/4] reduce number of ISD opcodes for MOPR/MOPRR

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp  | 225 ++++---------------
 llvm/lib/Target/RISCV/RISCVISelLowering.h    |   6 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td |  34 ++-
 3 files changed, 58 insertions(+), 207 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d1063b5a88e17a..7dc088a4a9a5e7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8405,81 +8405,31 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
   }
   case Intrinsic::riscv_mopr: {
-    unsigned mopr_id = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
-    switch (mopr_id) {
-#define RISCV_MOPR_64_CASE(MOPR_ID, OPCODE)                                    \
-  case MOPR_ID: {                                                              \
-    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
-        Op.getValueType() == MVT::i32) {                                       \
-      SDValue NewOp =                                                          \
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        \
-      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                  \
-      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    \
-    }                                                                          \
-    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1));                  \
-  }
-      RISCV_MOPR_64_CASE(0, RISCVISD::MOPR0)
-      RISCV_MOPR_64_CASE(1, RISCVISD::MOPR1)
-      RISCV_MOPR_64_CASE(2, RISCVISD::MOPR2)
-      RISCV_MOPR_64_CASE(3, RISCVISD::MOPR3)
-      RISCV_MOPR_64_CASE(4, RISCVISD::MOPR4)
-      RISCV_MOPR_64_CASE(5, RISCVISD::MOPR5)
-      RISCV_MOPR_64_CASE(6, RISCVISD::MOPR6)
-      RISCV_MOPR_64_CASE(7, RISCVISD::MOPR7)
-      RISCV_MOPR_64_CASE(8, RISCVISD::MOPR8)
-      RISCV_MOPR_64_CASE(9, RISCVISD::MOPR9)
-      RISCV_MOPR_64_CASE(10, RISCVISD::MOPR10)
-      RISCV_MOPR_64_CASE(11, RISCVISD::MOPR11)
-      RISCV_MOPR_64_CASE(12, RISCVISD::MOPR12)
-      RISCV_MOPR_64_CASE(13, RISCVISD::MOPR13)
-      RISCV_MOPR_64_CASE(14, RISCVISD::MOPR14)
-      RISCV_MOPR_64_CASE(15, RISCVISD::MOPR15)
-      RISCV_MOPR_64_CASE(16, RISCVISD::MOPR16)
-      RISCV_MOPR_64_CASE(17, RISCVISD::MOPR17)
-      RISCV_MOPR_64_CASE(18, RISCVISD::MOPR18)
-      RISCV_MOPR_64_CASE(19, RISCVISD::MOPR19)
-      RISCV_MOPR_64_CASE(20, RISCVISD::MOPR20)
-      RISCV_MOPR_64_CASE(21, RISCVISD::MOPR21)
-      RISCV_MOPR_64_CASE(22, RISCVISD::MOPR22)
-      RISCV_MOPR_64_CASE(23, RISCVISD::MOPR23)
-      RISCV_MOPR_64_CASE(24, RISCVISD::MOPR24)
-      RISCV_MOPR_64_CASE(25, RISCVISD::MOPR25)
-      RISCV_MOPR_64_CASE(26, RISCVISD::MOPR26)
-      RISCV_MOPR_64_CASE(27, RISCVISD::MOPR27)
-      RISCV_MOPR_64_CASE(28, RISCVISD::MOPR28)
-      RISCV_MOPR_64_CASE(29, RISCVISD::MOPR29)
-      RISCV_MOPR_64_CASE(30, RISCVISD::MOPR30)
-      RISCV_MOPR_64_CASE(31, RISCVISD::MOPR31)
-#undef RISCV_MOPR_64_CASE
+    if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
+      SDValue NewOp =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
+      SDValue Res = DAG.getNode(
+          RISCVISD::MOPR, DL, MVT::i64, NewOp,
+          DAG.getTargetConstant(Op.getConstantOperandVal(2), DL, MVT::i64));
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
     }
+    return DAG.getNode(RISCVISD::MOPR, DL, XLenVT, Op.getOperand(1),
+                       Op.getOperand(2));
   }
+
   case Intrinsic::riscv_moprr: {
-    unsigned moprr_id = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
-    switch (moprr_id) {
-#define RISCV_MOPRR_64_CASE(MOPRR_ID, OPCODE)                                  \
-  case MOPRR_ID: {                                                             \
-    if (RV64LegalI32 && Subtarget.is64Bit() &&                                 \
-        Op.getValueType() == MVT::i32) {                                       \
-      SDValue NewOp0 =                                                         \
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));        \
-      SDValue NewOp1 =                                                         \
-          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));        \
-      SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);         \
-      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);                    \
-    }                                                                          \
-    return DAG.getNode(OPCODE, DL, XLenVT, Op.getOperand(1),                   \
-                       Op.getOperand(2));                                      \
-  }
-      RISCV_MOPRR_64_CASE(0, RISCVISD::MOPRR0)
-      RISCV_MOPRR_64_CASE(1, RISCVISD::MOPRR1)
-      RISCV_MOPRR_64_CASE(2, RISCVISD::MOPRR2)
-      RISCV_MOPRR_64_CASE(3, RISCVISD::MOPRR3)
-      RISCV_MOPRR_64_CASE(4, RISCVISD::MOPRR4)
-      RISCV_MOPRR_64_CASE(5, RISCVISD::MOPRR5)
-      RISCV_MOPRR_64_CASE(6, RISCVISD::MOPRR6)
-      RISCV_MOPRR_64_CASE(7, RISCVISD::MOPRR7)
-#undef RISCV_MOPRR_64_CASE
+    if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
+      SDValue NewOp0 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
+      SDValue NewOp1 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
+      SDValue Res = DAG.getNode(
+          RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1,
+          DAG.getTargetConstant(Op.getConstantOperandVal(3), DL, MVT::i64));
+      return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
     }
+    return DAG.getNode(RISCVISD::MOPRR, DL, XLenVT, Op.getOperand(1),
+                       Op.getOperand(2), Op.getOperand(3));
   }
   case Intrinsic::riscv_clmul:
     if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
@@ -11872,79 +11822,28 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       return;
     }
     case Intrinsic::riscv_mopr: {
-      unsigned mopr_id = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
-      switch (mopr_id) {
-#define RISCV_MOPR_CASE(MOPR_ID, OPCODE)                                       \
-  case MOPR_ID: {                                                              \
-    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
-      return;                                                                  \
-    SDValue NewOp =                                                            \
-        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          \
-    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp);                    \
-    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
-    return;                                                                    \
-  }
-        RISCV_MOPR_CASE(0, RISCVISD::MOPR0)
-        RISCV_MOPR_CASE(1, RISCVISD::MOPR1)
-        RISCV_MOPR_CASE(2, RISCVISD::MOPR2)
-        RISCV_MOPR_CASE(3, RISCVISD::MOPR3)
-        RISCV_MOPR_CASE(4, RISCVISD::MOPR4)
-        RISCV_MOPR_CASE(5, RISCVISD::MOPR5)
-        RISCV_MOPR_CASE(6, RISCVISD::MOPR6)
-        RISCV_MOPR_CASE(7, RISCVISD::MOPR7)
-        RISCV_MOPR_CASE(8, RISCVISD::MOPR8)
-        RISCV_MOPR_CASE(9, RISCVISD::MOPR9)
-        RISCV_MOPR_CASE(10, RISCVISD::MOPR10)
-        RISCV_MOPR_CASE(11, RISCVISD::MOPR11)
-        RISCV_MOPR_CASE(12, RISCVISD::MOPR12)
-        RISCV_MOPR_CASE(13, RISCVISD::MOPR13)
-        RISCV_MOPR_CASE(14, RISCVISD::MOPR14)
-        RISCV_MOPR_CASE(15, RISCVISD::MOPR15)
-        RISCV_MOPR_CASE(16, RISCVISD::MOPR16)
-        RISCV_MOPR_CASE(17, RISCVISD::MOPR17)
-        RISCV_MOPR_CASE(18, RISCVISD::MOPR18)
-        RISCV_MOPR_CASE(19, RISCVISD::MOPR19)
-        RISCV_MOPR_CASE(20, RISCVISD::MOPR20)
-        RISCV_MOPR_CASE(21, RISCVISD::MOPR21)
-        RISCV_MOPR_CASE(22, RISCVISD::MOPR22)
-        RISCV_MOPR_CASE(23, RISCVISD::MOPR23)
-        RISCV_MOPR_CASE(24, RISCVISD::MOPR24)
-        RISCV_MOPR_CASE(25, RISCVISD::MOPR25)
-        RISCV_MOPR_CASE(26, RISCVISD::MOPR26)
-        RISCV_MOPR_CASE(27, RISCVISD::MOPR27)
-        RISCV_MOPR_CASE(28, RISCVISD::MOPR28)
-        RISCV_MOPR_CASE(29, RISCVISD::MOPR29)
-        RISCV_MOPR_CASE(30, RISCVISD::MOPR30)
-        RISCV_MOPR_CASE(31, RISCVISD::MOPR31)
-#undef RISCV_MOPR_CASE
-      }
+      if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
+        return;
+      SDValue NewOp =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+      SDValue Res = DAG.getNode(
+          RISCVISD::MOPR, DL, MVT::i64, NewOp,
+          DAG.getTargetConstant(N->getConstantOperandVal(2), DL, MVT::i64));
+      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+      return;
     }
     case Intrinsic::riscv_moprr: {
-      unsigned moprr_id =
-          cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
-      switch (moprr_id) {
-#define RISCV_MOPRR_CASE(MOPRR_ID, OPCODE)                                     \
-  case MOPRR_ID: {                                                             \
-    if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)                \
-      return;                                                                  \
-    SDValue NewOp0 =                                                           \
-        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));          \
-    SDValue NewOp1 =                                                           \
-        DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));          \
-    SDValue Res = DAG.getNode(OPCODE, DL, MVT::i64, NewOp0, NewOp1);           \
-    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));          \
-    return;                                                                    \
-  }
-        RISCV_MOPRR_CASE(0, RISCVISD::MOPRR0)
-        RISCV_MOPRR_CASE(1, RISCVISD::MOPRR1)
-        RISCV_MOPRR_CASE(2, RISCVISD::MOPRR2)
-        RISCV_MOPRR_CASE(3, RISCVISD::MOPRR3)
-        RISCV_MOPRR_CASE(4, RISCVISD::MOPRR4)
-        RISCV_MOPRR_CASE(5, RISCVISD::MOPRR5)
-        RISCV_MOPRR_CASE(6, RISCVISD::MOPRR6)
-        RISCV_MOPRR_CASE(7, RISCVISD::MOPRR7)
-#undef RISCV_MOPRR_CASE
-      }
+      if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
+        return;
+      SDValue NewOp0 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
+      SDValue NewOp1 =
+          DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
+      SDValue Res = DAG.getNode(
+          RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1,
+          DAG.getTargetConstant(N->getConstantOperandVal(3), DL, MVT::i64));
+      Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
+      return;
     }
     case Intrinsic::riscv_clmul: {
       if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32)
@@ -18701,46 +18600,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(CLMUL)
   NODE_NAME_CASE(CLMULH)
   NODE_NAME_CASE(CLMULR)
-  NODE_NAME_CASE(MOPR0)
-  NODE_NAME_CASE(MOPR1)
-  NODE_NAME_CASE(MOPR2)
-  NODE_NAME_CASE(MOPR3)
-  NODE_NAME_CASE(MOPR4)
-  NODE_NAME_CASE(MOPR5)
-  NODE_NAME_CASE(MOPR6)
-  NODE_NAME_CASE(MOPR7)
-  NODE_NAME_CASE(MOPR8)
-  NODE_NAME_CASE(MOPR9)
-  NODE_NAME_CASE(MOPR10)
-  NODE_NAME_CASE(MOPR11)
-  NODE_NAME_CASE(MOPR12)
-  NODE_NAME_CASE(MOPR13)
-  NODE_NAME_CASE(MOPR14)
-  NODE_NAME_CASE(MOPR15)
-  NODE_NAME_CASE(MOPR16)
-  NODE_NAME_CASE(MOPR17)
-  NODE_NAME_CASE(MOPR18)
-  NODE_NAME_CASE(MOPR19)
-  NODE_NAME_CASE(MOPR20)
-  NODE_NAME_CASE(MOPR21)
-  NODE_NAME_CASE(MOPR22)
-  NODE_NAME_CASE(MOPR23)
-  NODE_NAME_CASE(MOPR24)
-  NODE_NAME_CASE(MOPR25)
-  NODE_NAME_CASE(MOPR26)
-  NODE_NAME_CASE(MOPR27)
-  NODE_NAME_CASE(MOPR28)
-  NODE_NAME_CASE(MOPR29)
-  NODE_NAME_CASE(MOPR30)
-  NODE_NAME_CASE(MOPR31)
-  NODE_NAME_CASE(MOPRR0)
-  NODE_NAME_CASE(MOPRR1)
-  NODE_NAME_CASE(MOPRR2)
-  NODE_NAME_CASE(MOPRR3)
-  NODE_NAME_CASE(MOPRR4)
-  NODE_NAME_CASE(MOPRR5)
-  NODE_NAME_CASE(MOPRR6)
-  NODE_NAME_CASE(MOPRR7)
+  NODE_NAME_CASE(MOPR)
+  NODE_NAME_CASE(MOPRR)
   NODE_NAME_CASE(SHA256SIG0)
   NODE_NAME_CASE(SHA256SIG1)
   NODE_NAME_CASE(SHA256SUM0)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 4fa8e50d141b51..f39cdbaa8e5484 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -143,11 +143,7 @@ enum NodeType : unsigned {
   SM3P0, SM3P1,
 
   // May-Be-Operations
-  MOPR0, MOPR1, MOPR2, MOPR3, MOPR4, MOPR5, MOPR6, MOPR7, MOPR8, MOPR9, MOPR10,
-  MOPR11, MOPR12, MOPR13, MOPR14, MOPR15, MOPR16, MOPR17, MOPR18, MOPR19,
-  MOPR20, MOPR21, MOPR22, MOPR23, MOPR24, MOPR25, MOPR26, MOPR27, MOPR28,
-  MOPR29, MOPR30, MOPR31, MOPRR0, MOPRR1, MOPRR2, MOPRR3, MOPRR4, MOPRR5,
-  MOPRR6, MOPRR7,
+  MOPR, MOPRR,
 
   // Vector Extension
   FIRST_VL_VECTOR_OP,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
index 8f60b1badcb4af..267c7a734fa96f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td
@@ -34,14 +34,13 @@ class RVInstRMoprr<bits<4> imm4, bits<3> imm3, bits<3> funct3, RISCVOpcode opcod
   let Inst{25} = imm4{0};
 }
 
-foreach i = 0...31 in {
-  defvar riscvisd_moprx = "RISCVISD::MOPR"#i;
-  def riscv_mopr#i : SDNode<riscvisd_moprx,  SDTIntUnaryOp>;
-}
-foreach i = 0...7 in {
-  defvar riscvisd_moprrx = "RISCVISD::MOPRR"#i;
-  def riscv_moprr#i : SDNode<riscvisd_moprrx,  SDTIntBinOp>;
-}
+def riscv_mopr  : SDNode<"RISCVISD::MOPR",
+                         SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
+                                              SDTCisSameAs<0, 2>]>>;
+def riscv_moprr : SDNode<"RISCVISD::MOPRR",
+                         SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
+                                              SDTCisSameAs<0, 2>,
+                                              SDTCisSameAs<0, 3>]>>;
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
 class RVMopr<bits<7> imm7, bits<5> imm5, bits<3> funct3,
@@ -67,21 +66,16 @@ foreach i = 0...7 in {
                 Sched<[]>;
 }
 
+let Predicates = [HasStdExtZimop] in {
 // Zimop instructions
 foreach i = 0...31 in {
-    defvar moprx = !cast<Instruction>("MOPR"#i);
-    defvar riscv_moprx = !cast<SDNode>("riscv_mopr"#i);
-    let Predicates = [HasStdExtZimop] in {
-    def : Pat<(XLenVT (riscv_moprx (XLenVT GPR:$rs1))),
-              (moprx GPR:$rs1)>;
-    } // Predicates = [HasStdExtZimop]
+  def : Pat<(XLenVT (riscv_mopr GPR:$rs1, (XLenVT i))),
+            (!cast<Instruction>("MOPR"#i) GPR:$rs1)>;
 }
 
 foreach i = 0...7 in {
-    defvar moprrx = !cast<Instruction>("MOPRR"#i);
-    defvar riscv_moprrx = !cast<SDNode>("riscv_moprr"#i);
-    let Predicates = [HasStdExtZimop] in {
-    def : Pat<(XLenVT (riscv_moprrx (XLenVT GPR:$rs1), (XLenVT GPR:$rs2))),
-              (moprrx GPR:$rs1, GPR:$rs2)>;
-    } // Predicates = [HasStdExtZimop]
+  def : Pat<(XLenVT (riscv_moprr GPR:$rs1, GPR:$rs2, (XLenVT i))),
+            (!cast<Instruction>("MOPRR"#i) GPR:$rs1, GPR:$rs2)>;
 }
+
+} // Predicates = [HasStdExtZimop]
\ No newline at end of file



More information about the llvm-commits mailing list