[llvm] [RISCV] Intrinsic Support for XCVsimd (PR #118557)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 3 14:27:44 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-ir

@llvm/pr-subscribers-backend-risc-v

Author: None (realqhc)

<details>
<summary>Changes</summary>

Add intrinsic support for XCVsimd extension.

Documentation: https://docs.openhwgroup.org/projects/cv32e40p-user-manual/en/latest/instruction_set_extensions.html#simd

Co-authored-by: @<!-- -->melonedo

---

Patch is 79.67 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/118557.diff


4 Files Affected:

- (modified) llvm/include/llvm/IR/IntrinsicsRISCVXCV.td (+124) 
- (modified) llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp (+22) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td (+227) 
- (added) llvm/test/CodeGen/RISCV/xcvsimd.ll (+2266) 


``````````diff
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
index 6e7e90438c6211..2f8e7acec5b347 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXCV.td
@@ -42,6 +42,70 @@ class ScalarCoreVMacGprGprGprImmIntrinsic
   : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
               [IntrNoMem, IntrWillReturn, IntrSpeculatable, ImmArg<ArgIndex<3>>]>;
 
+class ScalarCoreVSimdGprIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCoreVSimdGprGprIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCoreVSimdGprImmIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
+
+class ScalarCoreVSimdGprGprGprIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
+class ScalarCoreVSimdGprGprImmIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
+
+class ScalarCoreVSimdGprGprGprImmIntrinsic
+    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+                [IntrNoMem, IntrWillReturn, IntrSpeculatable, ImmArg<ArgIndex<3>>]>;
+
+multiclass ScalarCoreVSimdGprIntrinsicHB {
+  def int_riscv_cv_simd_ # NAME # _h : ScalarCoreVSimdGprIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _b : ScalarCoreVSimdGprIntrinsic;
+}
+
+multiclass ScalarCoreVSimdGprGprIntrinsicHB {
+  def int_riscv_cv_simd_ # NAME # _h : ScalarCoreVSimdGprGprIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _b : ScalarCoreVSimdGprGprIntrinsic;
+}
+
+multiclass ScalarCoreVSimdGprGprGprIntrinsicHB {
+  def int_riscv_cv_simd_ # NAME # _h : ScalarCoreVSimdGprGprGprIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _b : ScalarCoreVSimdGprGprGprIntrinsic;
+}
+
+multiclass ScalarCoreVSimdGprGprIntrinsicDiv {
+  def int_riscv_cv_simd_ # NAME # _div2 : ScalarCoreVSimdGprGprIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _div4 : ScalarCoreVSimdGprGprIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _div8 : ScalarCoreVSimdGprGprIntrinsic;
+}
+
+multiclass ScalarCoreVSimdGprImmIntrinsicHB {
+  def int_riscv_cv_simd_ # NAME # _h : ScalarCoreVSimdGprImmIntrinsic;
+  def int_riscv_cv_simd_ # NAME # _b : ScalarCoreVSimdGprImmIntrinsic;
+}
+
+multiclass CoreVSimdBinary <bit exclude_h = false> {
+  if exclude_h then {
+    def int_riscv_cv_simd_ # NAME # _b : ScalarCoreVSimdGprGprIntrinsic;
+  } else {
+    defm NAME : ScalarCoreVSimdGprGprIntrinsicHB;
+  }
+  defm NAME # _sc : ScalarCoreVSimdGprGprIntrinsicHB;
+}
+
+multiclass CoreVSimdTernary {
+  defm NAME : ScalarCoreVSimdGprGprGprIntrinsicHB;
+  defm NAME # _sc : ScalarCoreVSimdGprGprGprIntrinsicHB;
+}
+
 let TargetPrefix = "riscv" in {
   def int_riscv_cv_bitmanip_extract : ScalarCoreVBitManipGprGprIntrinsic;
   def int_riscv_cv_bitmanip_extractu : ScalarCoreVBitManipGprGprIntrinsic;
@@ -90,4 +154,64 @@ let TargetPrefix = "riscv" in {
   def int_riscv_cv_mac_machhuRN : ScalarCoreVMacGprGprGprImmIntrinsic;
   def int_riscv_cv_mac_macsRN   : ScalarCoreVMacGprGprGprImmIntrinsic;
   def int_riscv_cv_mac_machhsRN : ScalarCoreVMacGprGprGprImmIntrinsic;
+
+  defm add : CoreVSimdBinary<true>;
+  def int_riscv_cv_simd_add_h : ScalarCoreVSimdGprGprImmIntrinsic;
+  defm sub : CoreVSimdBinary<true>;
+  def int_riscv_cv_simd_sub_h : ScalarCoreVSimdGprGprImmIntrinsic;
+  defm avg : CoreVSimdBinary;
+  defm avgu : CoreVSimdBinary;
+  defm min : CoreVSimdBinary;
+  defm minu : CoreVSimdBinary;
+  defm max : CoreVSimdBinary;
+  defm maxu : CoreVSimdBinary;
+  defm srl : CoreVSimdBinary;
+  defm sra : CoreVSimdBinary;
+  defm sll : CoreVSimdBinary;
+  defm or : CoreVSimdBinary;
+  defm xor : CoreVSimdBinary;
+  defm and : CoreVSimdBinary;
+
+  defm abs : ScalarCoreVSimdGprIntrinsicHB;
+
+  defm dotup : CoreVSimdBinary;
+  defm dotusp : CoreVSimdBinary;
+  defm dotsp : CoreVSimdBinary;
+  defm sdotup : CoreVSimdTernary;
+  defm sdotusp : CoreVSimdTernary;
+  defm sdotsp : CoreVSimdTernary;
+
+  defm extract : ScalarCoreVSimdGprImmIntrinsicHB;
+  defm extractu : ScalarCoreVSimdGprImmIntrinsicHB;
+  def int_riscv_cv_simd_insert_b : ScalarCoreVSimdGprGprImmIntrinsic;
+  def int_riscv_cv_simd_insert_h : ScalarCoreVSimdGprGprImmIntrinsic;
+
+
+  defm shuffle : ScalarCoreVSimdGprGprIntrinsicHB;
+  def int_riscv_cv_simd_shuffle_sci_h : ScalarCoreVSimdGprImmIntrinsic;
+  def int_riscv_cv_simd_shuffle_sci_b : ScalarCoreVSimdGprImmIntrinsic;
+  defm shuffle2 : ScalarCoreVSimdGprGprGprIntrinsicHB;
+
+  def int_riscv_cv_simd_packhi_h : ScalarCoreVSimdGprGprIntrinsic;
+  def int_riscv_cv_simd_packlo_h : ScalarCoreVSimdGprGprIntrinsic;
+  def int_riscv_cv_simd_packhi_b : ScalarCoreVSimdGprGprGprIntrinsic;
+  def int_riscv_cv_simd_packlo_b : ScalarCoreVSimdGprGprGprIntrinsic;
+
+  defm cmpeq : CoreVSimdBinary;
+  defm cmpne : CoreVSimdBinary;
+  defm cmpgt : CoreVSimdBinary;
+  defm cmpge : CoreVSimdBinary;
+  defm cmplt : CoreVSimdBinary;
+  defm cmple : CoreVSimdBinary;
+  defm cmpgtu : CoreVSimdBinary;
+  defm cmpgeu : CoreVSimdBinary;
+  defm cmpltu : CoreVSimdBinary;
+  defm cmpleu : CoreVSimdBinary;
+
+  def int_riscv_cv_simd_cplxmul_r : ScalarCoreVSimdGprGprGprImmIntrinsic;
+  def int_riscv_cv_simd_cplxmul_i : ScalarCoreVSimdGprGprGprImmIntrinsic;
+
+  def int_riscv_cv_simd_cplxconj : ScalarCoreVSimdGprIntrinsic;
+
+  def int_riscv_cv_simd_subrotmj : ScalarCoreVSimdGprGprImmIntrinsic;
 } // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index eb3e1a1fe9fd5e..08cabc7c47faae 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -56,6 +56,8 @@ class RISCVExpandPseudo : public MachineFunctionPass {
                             MachineBasicBlock::iterator MBBI);
   bool expandRV32ZdinxLoad(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator MBBI);
+  bool expandVendorXcvsimdShuffle(MachineBasicBlock &MBB,
+                                  MachineBasicBlock::iterator MBBI);
 #ifndef NDEBUG
   unsigned getInstSizeInBytes(const MachineFunction &MF) const {
     unsigned Size = 0;
@@ -164,6 +166,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
   case RISCV::PseudoVMSET_M_B64:
     // vmset.m vd => vmxnor.mm vd, vd, vd
     return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
+  case RISCV::CV_SHUFFLE_SCI_B_PSEUDO:
+    return expandVendorXcvsimdShuffle(MBB, MBBI);
   }
 
   return false;
@@ -415,6 +419,24 @@ bool RISCVExpandPseudo::expandRV32ZdinxLoad(MachineBasicBlock &MBB,
   return true;
 }
 
+bool RISCVExpandPseudo::expandVendorXcvsimdShuffle(MachineBasicBlock &MBB,
+                                                   MachineBasicBlock::iterator
+                                                   MBBI) {
+  DebugLoc DL = MBBI->getDebugLoc();
+  Register DstReg = MBBI->getOperand(0).getReg();
+  Register SrcReg = MBBI->getOperand(1).getReg();
+  uint8_t Imm = MBBI->getOperand(2).getImm();
+  const unsigned Opcodes[] = {
+      RISCV::CV_SHUFFLEI0_SCI_B, RISCV::CV_SHUFFLEI1_SCI_B,
+      RISCV::CV_SHUFFLEI2_SCI_B, RISCV::CV_SHUFFLEI3_SCI_B};
+  const MCInstrDesc &Desc = TII->get(Opcodes[Imm >> 6]);
+  BuildMI(MBB, MBBI, DL, Desc, DstReg)
+      .addReg(SrcReg)
+      .addImm(APInt(6, Imm, true).getSExtValue());
+  MBBI->eraseFromParent();
+  return true;
+}
+
 class RISCVPreRAExpandPseudo : public MachineFunctionPass {
 public:
   const RISCVSubtarget *STI;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
index 4478e246111080..76e8a378e2935d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td
@@ -864,3 +864,230 @@ let Predicates = [HasVendorXCVmac] in {
   def : PatCoreVMacGprGprGprUimm5<"macsRN", "MACSRN">;
   def : PatCoreVMacGprGprGprUimm5<"machhsRN", "MACHHSRN">;
 }
+
+//===----------------------------------------------------------------------===//
+// Patterns for SIMD operations
+//===----------------------------------------------------------------------===//
+
+def cv_tsimm6 : Operand<XLenVT>, TImmLeaf<XLenVT, [{return isInt<6>(Imm);}]> {
+  let ParserMatchClass = SImmAsmOperand<6>;
+  let EncoderMethod = "getImmOpValue";
+  let DecoderMethod = "decodeSImmOperand<6>";
+  let OperandType = "OPERAND_SIMM6";
+  let MCOperandPredicate = [{
+    int64_t Imm;
+    return MCOp.evaluateAsConstantImm(Imm) && isUInt<6>(Imm);
+  }];
+  let OperandNamespace = "RISCVOp";
+}
+
+
+class CVUImmAsmOperand<int width> : AsmOperandClass {
+  let Name = "CVUImm" # width;
+  let RenderMethod = "addImmOperands";
+  let DiagnosticType = !strconcat("Invalid", Name);
+}
+
+def cv_uimm6 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<6>(Imm);}]> {
+  let ParserMatchClass = UImmAsmOperand<6>;
+  let EncoderMethod = "getImmOpValue";
+  let DecoderMethod = "decodeUImmOperand<6>";
+  let OperandType = "OPERAND_UIMM6";
+  let MCOperandPredicate = [{
+    int64_t Imm;
+    return MCOp.evaluateAsConstantImm(Imm) && isUInt<6>(Imm);
+  }];
+  let OperandNamespace = "RISCVOp";
+}
+
+def cv_imm8: Operand<XLenVT>, TImmLeaf<XLenVT, [{return isUInt<8>(Imm);}]>;
+
+class PatCorevGprGpr <string intr, string asm> :
+  PatGprGpr<!cast<Intrinsic>("int_riscv_cv_simd_" # intr),
+            !cast<RVInst>("CV_" # asm)>;
+
+// Note that rd is the last argument
+class PatCorevGprGprGpr <string intr, string asm> :
+  Pat<(!cast<Intrinsic>("int_riscv_cv_simd_" # intr) GPR:$rs1, GPR:$rs2, GPR:$rd),
+      (!cast<RVInst>("CV_" # asm) GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+
+class PatCorevGprTImm <string intr, string asm> :
+  PatGprImm<!cast<Intrinsic>("int_riscv_cv_simd_" # intr),
+            !cast<RVInst>("CV_" # asm), cv_tsimm6>;
+
+class PatCorevGprImm <string intr, string asm> :
+  PatGprImm<!cast<Intrinsic>("int_riscv_cv_simd_" # intr),
+            !cast<RVInst>("CV_" # asm), simm6>;
+
+class PatCorevGprUImm <string intr, string asm> :
+  PatGprImm<!cast<Intrinsic>("int_riscv_cv_simd_" # intr),
+            !cast<RVInst>("CV_" # asm), cv_uimm6>;
+
+multiclass PatCorevGprGprHB <string intr> {
+  def : PatCorevGprGpr<intr # "_h", NAME # "_H">;
+  def : PatCorevGprGpr<intr # "_b", NAME # "_B">;
+}
+
+multiclass PatCorevGprGprGprHB <string intr> {
+  def : PatCorevGprGprGpr<intr # "_h", NAME # "_H">;
+  def : PatCorevGprGprGpr<intr # "_b", NAME # "_B">;
+}
+
+multiclass PatCorevGprTImmHB <string intr> {
+  def : PatCorevGprTImm<intr # "_h", NAME # "_H">;
+  def : PatCorevGprTImm<intr # "_b", NAME # "_B">;
+}
+
+multiclass PatCorevGprImmHB <string intr> {
+  def : PatCorevGprImm<intr # "_h", NAME # "_H">;
+  def : PatCorevGprImm<intr # "_b", NAME # "_B">;
+}
+
+multiclass PatCorevGprUImmHB <string intr> {
+  def : PatCorevGprUImm<intr # "_h", NAME # "_H">;
+  def : PatCorevGprUImm<intr # "_b", NAME # "_B">;
+}
+
+class PatCorevGprGprDiv <Intrinsic intr, string asm, int div> :
+  Pat<(intr GPR:$rs1, GPR:$rs2, (i32 div)),
+      (!cast<RVInst>("CV_" # asm) GPR:$rs1, GPR:$rs2)>;
+
+class PatCorevGprGprGprDiv <Intrinsic intr, string asm, int div> :
+  Pat<(intr GPR:$rd, GPR:$rs1, GPR:$rs2, (i32 div)),
+      (!cast<RVInst>("CV_" # asm) GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+
+class PatCorevGprShuffle <Intrinsic intr, RVInst inst, int s> :
+  Pat<(intr GPR:$rs1, (i32 s)),
+      (inst GPR:$rs1, 0)>;
+
+multiclass PatCorevGprGprDivAll <Intrinsic intr> {
+  def : PatCorevGprGprDiv<intr, NAME # "_DIV2", 1>;
+  def : PatCorevGprGprDiv<intr, NAME # "_DIV4", 2>;
+  def : PatCorevGprGprDiv<intr, NAME # "_DIV8", 3>;
+}
+
+multiclass PatCorevGprGprGprDivAll <Intrinsic intr> {
+  def : PatCorevGprGprGprDiv<intr, NAME # "_DIV2", 1>;
+  def : PatCorevGprGprGprDiv<intr, NAME # "_DIV4", 2>;
+  def : PatCorevGprGprGprDiv<intr, NAME # "_DIV8", 3>;
+}
+
+class PatCorevGpr <string intr, string asm> :
+  PatGpr<!cast<Intrinsic>("int_riscv_cv_simd_" # intr),
+            !cast<RVInst>("CV_" # asm)>;
+
+multiclass PatCorevGprHB <string intr> {
+  def : PatCorevGpr<intr # "_h", NAME # "_H">;
+  def : PatCorevGpr<intr # "_b", NAME # "_B">;
+}
+
+multiclass PatCorevBinary <string intr, bit exclude_h = false> {
+  if exclude_h then {
+    def : PatCorevGprGpr<intr # "_b", NAME # "_B">;
+  } else {
+    defm NAME : PatCorevGprGprHB<intr>;
+  }
+  defm NAME # "_SC" : PatCorevGprGprHB<intr # "_sc">;
+  defm NAME # "_SCI" : PatCorevGprImmHB<intr # "_sc">;
+}
+
+multiclass PatCorevBinaryUnsigned <string intr> {
+  defm NAME : PatCorevGprGprHB<intr>;
+  defm NAME # "_SC" : PatCorevGprGprHB<intr # "_sc">;
+  defm NAME # "_SCI" : PatCorevGprUImmHB<intr # "_sc">;
+}
+
+multiclass PatCorevTernary <string intr> {
+  defm NAME : PatCorevGprGprGprHB<intr>;
+  defm NAME # "_SC" : PatCorevGprGprGprHB<intr # "_sc">;
+  def : Pat<(!cast<Intrinsic>("int_riscv_cv_simd_" # intr # "_sc_h") GPR:$rs1, simm6:$rs2, GPR:$rd),
+            (!cast<RVInst>("CV_" # NAME # "_SCI_H") GPR:$rd, GPR:$rs1, simm6:$rs2)>;
+  def : Pat<(!cast<Intrinsic>("int_riscv_cv_simd_" # intr # "_sc_b") GPR:$rs1, simm6:$rs2, GPR:$rd),
+            (!cast<RVInst>("CV_" # NAME # "_SCI_B") GPR:$rd, GPR:$rs1, simm6:$rs2)>;
+}
+
+multiclass PatCorevTernaryUnsigned <string intr> {
+  defm NAME : PatCorevGprGprGprHB<intr>;
+  defm NAME # "_SC" : PatCorevGprGprGprHB<intr # "_sc">;
+  def : Pat<(!cast<Intrinsic>("int_riscv_cv_simd_" # intr # "_sc_h") GPR:$rs1, cv_uimm6:$rs2, GPR:$rd),
+            (!cast<RVInst>("CV_" # NAME # "_SCI_H") GPR:$rd, GPR:$rs1, cv_uimm6:$rs2)>;
+  def : Pat<(!cast<Intrinsic>("int_riscv_cv_simd_" # intr # "_sc_b") GPR:$rs1, cv_uimm6:$rs2, GPR:$rd),
+            (!cast<RVInst>("CV_" # NAME # "_SCI_B") GPR:$rd, GPR:$rs1, cv_uimm6:$rs2)>;
+}
+
+let Predicates = [HasVendorXCVsimd] in {
+  defm ADD :  PatCorevBinary<"add", true>;
+  def : PatCorevGprGprDiv<int_riscv_cv_simd_add_h, "ADD_H", 0>;
+  defm SUB :  PatCorevBinary<"sub", true>;
+  def : PatCorevGprGprDiv<int_riscv_cv_simd_sub_h, "SUB_H", 0>;
+  defm AVG :  PatCorevBinary<"avg">;
+  defm AVGU : PatCorevBinaryUnsigned<"avgu">;
+  defm MIN :  PatCorevBinary<"min">;
+  defm MINU : PatCorevBinaryUnsigned<"minu">;
+  defm MAX :  PatCorevBinary<"max">;
+  defm MAXU : PatCorevBinaryUnsigned<"maxu">;
+  defm SRL :  PatCorevBinaryUnsigned<"srl">;
+  defm SRA :  PatCorevBinaryUnsigned<"sra">;
+  defm SLL :  PatCorevBinaryUnsigned<"sll">;
+  defm OR :   PatCorevBinary<"or">;
+  defm XOR :  PatCorevBinary<"xor">;
+  defm AND :  PatCorevBinary<"and">;
+
+  defm ABS : PatCorevGprHB<"abs">;
+
+  defm DOTUP :   PatCorevBinaryUnsigned<"dotup">;
+  defm DOTUSP :  PatCorevBinary<"dotusp">;
+  defm DOTSP :   PatCorevBinary<"dotsp">;
+  defm SDOTUP :  PatCorevTernaryUnsigned<"sdotup">;
+  defm SDOTUSP : PatCorevTernary<"sdotusp">;
+  defm SDOTSP :  PatCorevTernary<"sdotsp">;
+
+  defm EXTRACT :  PatCorevGprTImmHB<"extract">;
+  defm EXTRACTU : PatCorevGprTImmHB<"extractu">;
+  def : Pat<(int_riscv_cv_simd_insert_b GPR:$rd, GPR:$rs1, cv_tsimm6:$imm),
+            (CV_INSERT_B GPR:$rd, GPR:$rs1, cv_tsimm6:$imm)>;
+  def : Pat<(int_riscv_cv_simd_insert_h GPR:$rd, GPR:$rs1, cv_tsimm6:$imm),
+            (CV_INSERT_H GPR:$rd, GPR:$rs1, cv_tsimm6:$imm)>;
+
+  defm SHUFFLE : PatCorevGprGprHB<"shuffle">;
+  def : PatCorevGprTImm<"shuffle_sci_h", "SHUFFLE_SCI_H">;
+
+  def CV_SHUFFLE_SCI_B_PSEUDO : Pseudo<(outs GPR:$rd), (ins GPR:$rs, cv_imm8:$imm), []>;
+  def : PatGprImm<int_riscv_cv_simd_shuffle_sci_b, CV_SHUFFLE_SCI_B_PSEUDO, cv_imm8>;
+
+  def : Pat<(int_riscv_cv_simd_shuffle2_h GPR:$rs1, GPR:$rs2, GPR:$rd),
+            (CV_SHUFFLE2_H GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+  def : Pat<(int_riscv_cv_simd_shuffle2_b GPR:$rs1, GPR:$rs2, GPR:$rd),
+            (CV_SHUFFLE2_B GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+
+  def : PatCorevGprGpr<"packhi_h", "PACK_H">;
+  def : PatCorevGprGpr<"packlo_h", "PACK">;
+  def : Pat<(int_riscv_cv_simd_packhi_b GPR:$rd, GPR:$rs1, GPR:$rs2),
+            (CV_PACKHI_B GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+  def : Pat<(int_riscv_cv_simd_packlo_b GPR:$rd, GPR:$rs1, GPR:$rs2),
+            (CV_PACKLO_B GPR:$rd, GPR:$rs1, GPR:$rs2)>;
+
+  defm CMPEQ :  PatCorevBinary<"cmpeq">;
+  defm CMPNE :  PatCorevBinary<"cmpne">;
+  defm CMPGT :  PatCorevBinary<"cmpgt">;
+  defm CMPGE :  PatCorevBinary<"cmpge">;
+  defm CMPLT :  PatCorevBinary<"cmplt">;
+  defm CMPLE :  PatCorevBinary<"cmple">;
+  defm CMPGTU : PatCorevBinaryUnsigned<"cmpgtu">;
+  defm CMPGEU : PatCorevBinaryUnsigned<"cmpgeu">;
+  defm CMPLTU : PatCorevBinaryUnsigned<"cmpltu">;
+  defm CMPLEU : PatCorevBinaryUnsigned<"cmpleu">;
+
+  def : PatCorevGprGprGprDiv<int_riscv_cv_simd_cplxmul_r, "CPLXMUL_R", 0>;
+  defm CPLXMUL_R : PatCorevGprGprGprDivAll<int_riscv_cv_simd_cplxmul_r>;
+  def : PatCorevGprGprGprDiv<int_riscv_cv_simd_cplxmul_i, "CPLXMUL_I", 0>;
+  defm CPLXMUL_I : PatCorevGprGprGprDivAll<int_riscv_cv_simd_cplxmul_i>;
+
+  def : PatCorevGpr<"cplxconj", "CPLXCONJ">;
+
+  def : PatCorevGprGprDiv<int_riscv_cv_simd_subrotmj, "SUBROTMJ", 0>;
+  defm SUBROTMJ : PatCorevGprGprDivAll<int_riscv_cv_simd_subrotmj>;
+
+  defm ADD : PatCorevGprGprDivAll<int_riscv_cv_simd_add_h>;
+  defm SUB : PatCorevGprGprDivAll<int_riscv_cv_simd_sub_h>;
+} // Predicates = [HasVendorXCVsimd]
\ No newline at end of file
diff --git a/llvm/test/CodeGen/RISCV/xcvsimd.ll b/llvm/test/CodeGen/RISCV/xcvsimd.ll
new file mode 100644
index 00000000000000..ab39e75ab32d91
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xcvsimd.ll
@@ -0,0 +1,2266 @@
+; RUN: llc -O0 -mtriple=riscv32 -mattr=+m -mattr=+xcvsimd -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s
+
+declare i32 @llvm.riscv.cv.simd.add.h(i32, i32, i32)
+
+define i32 @test.cv.add.h(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.add.h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.h a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.h(i32 %a, i32 %b, i32 0)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.add.b(i32, i32)
+
+define i32 @test.cv.add.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.add.b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.b a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.b(i32 %a, i32 %b)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.add.sc.h(i32, i32)
+
+define i32 @test.cv.add.sc.h(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.add.sc.h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.sc.h a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.sc.h(i32 %a, i32 %b)
+  ret i32 %1
+}
+
+define i32 @test.cv.add.sci.h(i32 %a) {
+; CHECK-LABEL: test.cv.add.sci.h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.sci.h a0, a0, 5
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.sc.h(i32 %a, i32 5)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.add.sc.b(i32, i32)
+
+define i32 @test.cv.add.sc.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.add.sc.b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.sc.b a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.sc.b(i32 %a, i32 %b)
+  ret i32 %1
+}
+
+define i32 @test.cv.add.sci.b(i32 %a) {
+; CHECK-LABEL: test.cv.add.sci.b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.add.sci.b a0, a0, 5
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.add.sc.b(i32 %a, i32 5)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.sub.h(i32, i32, i32)
+
+define i32 @test.cv.sub.h(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.sub.h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.sub.h a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.sub.h(i32 %a, i32 %b, i32 0)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.sub.b(i32, i32)
+
+define i32 @test.cv.sub.b(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.sub.b:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.sub.b a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.sub.b(i32 %a, i32 %b)
+  ret i32 %1
+}
+
+declare i32 @llvm.riscv.cv.simd.sub.sc.h(i32, i32)
+
+define i32 @test.cv.sub.sc.h(i32 %a, i32 %b) {
+; CHECK-LABEL: test.cv.sub.sc.h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.sub.sc.h a0, a0, a1
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.riscv.cv.simd.sub.sc.h(i32 %a, i32 %b)
+  r...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/118557


More information about the llvm-commits mailing list