[llvm] [RISCV] Sink conversion from nfields/lmul to nf down one level in RISCVInstrInfoV.td. NFC (PR #179369)

via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 2 17:51:51 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

The nf field is encoded as nfields/lmul minus one. Use asserts to
verify this doesn't lose any information.

The asserts increase the number of lines, but I think this makes the class interfaces a more logical level than encoding.

Stacked on #<!-- -->179365

---

Patch is 23.85 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/179369.diff


2 Files Affected:

- (modified) llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp (+4-4) 
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoV.td (+169-155) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
index 6d278106646a1..407fecf6d62d7 100644
--- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
+++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
@@ -27,7 +27,7 @@ struct VXMemOpInfo {
   unsigned Log2IdxEEW : 3;
   unsigned IsOrdered : 1;
   unsigned IsStore : 1;
-  unsigned NF : 4;
+  unsigned NFields : 4;
   unsigned BaseInstr;
 };
 
@@ -269,7 +269,7 @@ unsigned RISCVInstrumentManager::getSchedClassID(
     // the DataEEW and DataEMUL are equal to SEW and LMUL, respectively.
     unsigned IndexEMUL = ((1 << VXMO->Log2IdxEEW) * LMUL) / SEW;
 
-    if (!VXMO->NF) {
+    if (!VXMO->NFields) {
       // Indexed Load / Store.
       if (VXMO->IsStore) {
         if (const auto *VXP = RISCV::getVSXPseudo(
@@ -286,12 +286,12 @@ unsigned RISCVInstrumentManager::getSchedClassID(
       // Segmented Indexed Load / Store.
       if (VXMO->IsStore) {
         if (const auto *VXP =
-                RISCV::getVSXSEGPseudo(VXMO->NF, /*Masked=*/0, VXMO->IsOrdered,
+                RISCV::getVSXSEGPseudo(VXMO->NFields, /*Masked=*/0, VXMO->IsOrdered,
                                        VXMO->Log2IdxEEW, LMUL, IndexEMUL))
           VPOpcode = VXP->Pseudo;
       } else {
         if (const auto *VXP =
-                RISCV::getVLXSEGPseudo(VXMO->NF, /*Masked=*/0, VXMO->IsOrdered,
+                RISCV::getVLXSEGPseudo(VXMO->NFields, /*Masked=*/0, VXMO->IsOrdered,
                                        VXMO->Log2IdxEEW, LMUL, IndexEMUL))
           VPOpcode = VXP->Pseudo;
       }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index e674a48957b43..b58d8e3edf774 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -263,76 +263,72 @@ class VLFSched<string lmul, bit forceMasked = 0> : SchedCommon<
 class VLFSchedMC: VLFSched<"WorstCase", forceMasked=1>;
 
 // Unit-Stride Segment Loads and Stores
-class VLSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVLSEG" #nf #"e" #eew #"_" #emul)],
-  [ReadVLDX], emul, eew, forceMasked
+class VLSEGSched<int nfields, int eew, string emul,bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVLSEG" # nfields #"e" #eew #"_" #emul)],
+                  [ReadVLDX], emul, eew, forceMasked
 >;
-class VLSEGSchedMC<int nf, int eew> : VLSEGSched<nf, eew, "WorstCase",
-                                                 forceMasked=1>;
-
-class VSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVSSEG" # nf # "e" # eew # "_" # emul)],
-  [!cast<SchedRead>("ReadVSTEV_" #emul), ReadVSTX], emul, eew, forceMasked
->;
-class VSSEGSchedMC<int nf, int eew> : VSSEGSched<nf, eew, "WorstCase",
-                                                 forceMasked=1>;
-
-class VLSEGFFSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVLSEGFF" # nf # "e" # eew # "_" # emul)],
-  [ReadVLDX], emul, eew, forceMasked
->;
-class VLSEGFFSchedMC<int nf, int eew> : VLSEGFFSched<nf, eew, "WorstCase",
-                                                     forceMasked=1>;
+class VLSEGSchedMC<int nfields, int eew>
+    : VLSEGSched<nfields, eew, "WorstCase", forceMasked=1>;
+
+class VSSEGSched<int nfields, int eew, string emul, bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVSSEG" # nfields # "e" # eew # "_" # emul)],
+                  [!cast<SchedRead>("ReadVSTEV_" #emul), ReadVSTX], emul, eew,
+                  forceMasked>;
+class VSSEGSchedMC<int nfields, int eew>
+    : VSSEGSched<nfields, eew, "WorstCase", forceMasked=1>;
+
+class VLSEGFFSched<int nfields, int eew, string emul, bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVLSEGFF" # nfields # "e" # eew # "_" # emul)],
+                  [ReadVLDX], emul, eew, forceMasked>;
+class VLSEGFFSchedMC<int nfields, int eew>
+    : VLSEGFFSched<nfields, eew, "WorstCase", forceMasked=1>;
 
 // Strided Segment Loads and Stores
-class VLSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #emul)],
-  [ReadVLDX, ReadVLDSX], emul, eew, forceMasked
->;
-class VLSSEGSchedMC<int nf, int eew> : VLSSEGSched<nf, eew, "WorstCase",
-                                                   forceMasked=1>;
-
-class VSSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #emul)],
-  [!cast<SchedRead>("ReadVSTS" #eew #"V_" #emul),
-   ReadVSTX, ReadVSTSX], emul, eew, forceMasked
->;
-class VSSSEGSchedMC<int nf, int eew> : VSSSEGSched<nf, eew, "WorstCase",
-                                                   forceMasked=1>;
+class VLSSEGSched<int nfields, int eew, string emul, bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVLSSEG" # nfields #"e" #eew #"_" #emul)],
+                  [ReadVLDX, ReadVLDSX], emul, eew, forceMasked>;
+class VLSSEGSchedMC<int nfields, int eew>
+    : VLSSEGSched<nfields, eew, "WorstCase", forceMasked=1>;
+
+class VSSSEGSched<int nfields, int eew, string emul, bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVSSSEG" # nfields #"e" #eew #"_" #emul)],
+                  [!cast<SchedRead>("ReadVSTS" #eew #"V_" #emul), ReadVSTX, ReadVSTSX],
+                  emul, eew, forceMasked>;
+class VSSSEGSchedMC<int nfields, int eew>
+    : VSSSEGSched<nfields, eew, "WorstCase", forceMasked=1>;
 
 // Indexed Segment Loads and Stores
-class VLXSEGSched<int nf, int eew, bit isOrdered, string emul,
-                  bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVL" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)],
-  [ReadVLDX, !cast<SchedRead>("ReadVLD" #!if(isOrdered, "O", "U") #"XV_" #emul)],
-  emul, eew, forceMasked
->;
-class VLXSEGSchedMC<int nf, int eew, bit isOrdered>:
-  VLXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>;
+class VLXSEGSched<int nfields, int eew, bit isOrdered, string emul,
+                  bit forceMasked = 0>
+    : SchedCommon<[!cast<SchedWrite>("WriteVL" #!if(isOrdered, "O", "U") #"XSEG" # nfields #"e" #eew #"_" #emul)],
+                  [ReadVLDX, !cast<SchedRead>("ReadVLD" #!if(isOrdered, "O", "U") #"XV_" #emul)],
+                  emul, eew, forceMasked>;
+class VLXSEGSchedMC<int nfields, int eew, bit isOrdered>
+    : VLXSEGSched<nfields, eew, isOrdered, "WorstCase", forceMasked=1>;
 
 // Passes sew=0 instead of eew=0 since this pseudo does not follow MX_E form.
-class VSXSEGSched<int nf, int eew, bit isOrdered, string emul,
+class VSXSEGSched<int nfields, int eew, bit isOrdered, string emul,
                   bit forceMasked = 0> : SchedCommon<
-  [!cast<SchedWrite>("WriteVS" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)],
+  [!cast<SchedWrite>("WriteVS" #!if(isOrdered, "O", "U") #"XSEG" #nfields #"e" #eew #"_" #emul)],
   [!cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"X" #eew #"_" #emul),
    ReadVSTX, !cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"XV_" #emul)],
   emul, sew=0, forceMasked=forceMasked
 >;
-class VSXSEGSchedMC<int nf, int eew, bit isOrdered>:
-  VSXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>;
+class VSXSEGSchedMC<int nfields, int eew, bit isOrdered>:
+  VSXSEGSched<nfields, eew, isOrdered, "WorstCase", forceMasked=1>;
 
 class RISCVVXMemOpMC<bits<3> E, bit Ordered, bit Store, bits<4> N = 0> {
   bits<3> Log2EEW = E;
   bits<1> IsOrdered = Ordered;
   bits<1> IsStore = Store;
-  bits<4> NF = N;
+  bits<4> NFields = N;
   Instruction BaseInstr = !cast<Instruction>(NAME);
 }
 
 def RISCVBaseVXMemOpTable : GenericTable {
   let FilterClass = "RISCVVXMemOpMC";
   let CppTypeName = "VXMemOpInfo";
-  let Fields = ["Log2EEW", "IsOrdered", "IsStore", "NF", "BaseInstr"];
+  let Fields = ["Log2EEW", "IsOrdered", "IsStore", "NFields", "BaseInstr"];
   let PrimaryKey = ["BaseInstr"];
   let PrimaryKeyName = "getVXMemOpInfo";
 }
@@ -350,10 +346,12 @@ class VUnitStrideLoad<RISCVWidth width, string opcodestr>
 
 let vm = 1, RVVConstraint = NoConstraint in {
 // unit-stride whole register load vl<nf>r.v vd, (rs1)
-class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
-    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
+class VWholeLoad<int lmul, RISCVWidth width, string opcodestr, RegisterClass VRC>
+    : RVInstVLU<!sub(lmul, 1), width.Value{3}, LUMOPUnitStrideWholeReg,
                 width.Value{2-0}, (outs VRC:$vd), (ins GPRMemZeroOffset:$rs1),
                 opcodestr, "$vd, $rs1"> {
+  assert !and(!ge(lmul, 1), !le(lmul, 8)), "lmul must be 1-8";
+
   let Uses = [];
 }
 
@@ -385,31 +383,39 @@ class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
                 "$vd, $rs1, $vs2$vm">;
 
 // unit-stride segment load vd, (rs1), vm
-class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
-    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
+class VUnitStrideSegmentLoad<int nfields, RISCVWidth width, string opcodestr>
+    : RVInstVLU<!sub(nfields, 1), width.Value{3}, LUMOPUnitStride, width.Value{2-0},
                 (outs VR:$vd),
-                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
+                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 
 // segment fault-only-first load vd, (rs1), vm
-class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
-    : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
+class VUnitStrideSegmentLoadFF<int nfields, RISCVWidth width, string opcodestr>
+    : RVInstVLU<!sub(nfields, 1), width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
                 (outs VR:$vd),
-                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
+                (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 
 // strided segment load vd, (rs1), rs2, vm
-class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
-    : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
+class VStridedSegmentLoad<int nfields, RISCVWidth width, string opcodestr>
+    : RVInstVLS<!sub(nfields, 1), width.Value{3}, width.Value{2-0},
                 (outs VR:$vd),
                 (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
-                "$vd, $rs1, $rs2$vm">;
+                "$vd, $rs1, $rs2$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 
 // indexed segment load vd, (rs1), vs2, vm
-class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
+class VIndexedSegmentLoad<int nfields, RISCVMOP mop, RISCVWidth width,
                           string opcodestr>
-    : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
+    : RVInstVLX<!sub(nfields, 1), width.Value{3}, mop, width.Value{2-0},
                 (outs VR:$vd),
                 (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
-                "$vd, $rs1, $vs2$vm">;
+                "$vd, $rs1, $vs2$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
@@ -420,11 +426,13 @@ class VUnitStrideStore<RISCVWidth width, string opcodestr>
                 "$vs3, ${rs1}$vm">;
 
 let vm = 1 in {
-// vs<nf>r.v vd, (rs1)
-class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
-    : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
+// vs<lmul>r.v vd, (rs1)
+class VWholeStore<int lmul, string opcodestr, RegisterClass VRC>
+    : RVInstVSU<!sub(lmul, 1), 0, SUMOPUnitStrideWholeReg,
                 0b000, (outs), (ins VRC:$vs3, GPRMemZeroOffset:$rs1),
                 opcodestr, "$vs3, $rs1"> {
+  assert !and(!ge(lmul, 1), !le(lmul, 8)), "lmul must be 1-8";
+
   let Uses = [];
 }
 
@@ -448,23 +456,29 @@ class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
                 opcodestr, "$vs3, $rs1, $vs2$vm">;
 
 // segment store vd, vs3, (rs1), vm
-class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
-    : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
+class VUnitStrideSegmentStore<int nfields, RISCVWidth width, string opcodestr>
+    : RVInstVSU<!sub(nfields, 1), width.Value{3}, SUMOPUnitStride, width.Value{2-0},
                 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
-                "$vs3, ${rs1}$vm">;
+                "$vs3, ${rs1}$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 
 // segment store vd, vs3, (rs1), rs2, vm
-class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
-    : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
+class VStridedSegmentStore<int nfields, RISCVWidth width, string opcodestr>
+    : RVInstVSS<!sub(nfields, 1), width.Value{3}, width.Value{2-0}, (outs),
                 (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
-                opcodestr, "$vs3, $rs1, $rs2$vm">;
+                opcodestr, "$vs3, $rs1, $rs2$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 
 // segment store vd, vs3, (rs1), vs2, vm
-class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
+class VIndexedSegmentStore<int nfields, RISCVMOP mop, RISCVWidth width,
                            string opcodestr>
-    : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
+    : RVInstVSX<!sub(nfields, 1), width.Value{3}, mop, width.Value{2-0}, (outs),
                 (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
-                opcodestr, "$vs3, $rs1, $vs2$vm">;
+                opcodestr, "$vs3, $rs1, $vs2$vm"> {
+  assert !and(!ge(nfields, 2), !le(nfields, 8)), "nfields must be 2-8";
+}
 } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
@@ -1065,12 +1079,12 @@ multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
            SchedBinaryMC<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV">;
 }
 
-multiclass VWholeLoadN<int l, bits<3> nf, string opcodestr, RegisterClass VRC> {
-  defvar w = !cast<RISCVWidth>("LSWidth" # l);
-  defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
+multiclass VWholeLoadN<int eew, int lmul, string opcodestr, RegisterClass VRC> {
+  defvar w = !cast<RISCVWidth>("LSWidth" # eew);
+  defvar s = !cast<SchedWrite>("WriteVLD" # lmul # "R");
 
-  def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
-                   Sched<[s, ReadVLDX]>;
+  def E # eew # _V : VWholeLoad<lmul, w, opcodestr # "e" # eew # ".v", VRC>,
+                     Sched<[s, ReadVLDX]>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -1108,10 +1122,10 @@ foreach eew = [8, 16, 32, 64] in {
     def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSchedMC<eew>;
     def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSchedMC<eew>;
 
-    defm VL1R : VWholeLoadN<eew, 0, "vl1r", VR>;
-    defm VL2R : VWholeLoadN<eew, 1, "vl2r", VRM2>;
-    defm VL4R : VWholeLoadN<eew, 3, "vl4r", VRM4>;
-    defm VL8R : VWholeLoadN<eew, 7, "vl8r", VRM8>;
+    defm VL1R : VWholeLoadN<eew, 1, "vl1r", VR>;
+    defm VL2R : VWholeLoadN<eew, 2, "vl2r", VRM2>;
+    defm VL4R : VWholeLoadN<eew, 4, "vl4r", VRM4>;
+    defm VL8R : VWholeLoadN<eew, 8, "vl8r", VRM8>;
   }
 
   let Predicates = !if(!eq(eew, 64), [IsRV64, HasVInstructionsI64],
@@ -1127,13 +1141,13 @@ def VSM_V : VUnitStrideStoreMask<"vsm.v">,
 def : MnemonicAlias<"vle1.v", "vlm.v">;
 def : MnemonicAlias<"vse1.v", "vsm.v">;
 
-def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
+def VS1R_V : VWholeStore<1, "vs1r.v", VR>,
              Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
-def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
+def VS2R_V : VWholeStore<2, "vs2r.v", VRM2>,
              Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
-def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
+def VS4R_V : VWholeStore<4, "vs4r.v", VRM4>,
              Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
-def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
+def VS8R_V : VWholeStore<8, "vs8r.v", VRM8>,
              Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
 
 def : InstAlias<"vl1r.v $vd, $rs1", (VL1RE8_V VR:$vd,   GPRMemZeroOffset:$rs1)>;
@@ -1773,93 +1787,93 @@ foreach n = [1, 2, 4, 8] in {
 } // Predicates = [HasVInstructions]
 
 let Predicates = [HasVInstructions] in {
-  foreach nf=2-8 in {
+  foreach nfields=2-8 in {
     foreach eew = [8, 16, 32] in {
       defvar w = !cast<RISCVWidth>("LSWidth"#eew);
 
-      def VLSEG#nf#E#eew#_V :
-        VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
-        VLSEGSchedMC<nf, eew>;
-      def VLSEG#nf#E#eew#FF_V :
-        VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
-        VLSEGFFSchedMC<nf, eew>;
-      def VSSEG#nf#E#eew#_V :
-        VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
-        VSSEGSchedMC<nf, eew>;
+      def VLSEG#nfields#E#eew#_V :
+        VUnitStrideSegmentLoad<nfields, w, "vlseg"#nfields#"e"#eew#".v">,
+        VLSEGSchedMC<nfields, eew>;
+      def VLSEG#nfields#E#eew#FF_V :
+        VUnitStrideSegmentLoadFF<nfields, w, "vlseg"#nfields#"e"#eew#"ff.v">,
+        VLSEGFFSchedMC<nfields, eew>;
+      def VSSEG#nfields#E#eew#_V :
+        VUnitStrideSegmentStore<nfields, w, "vsseg"#nfields#"e"#eew#".v">,
+        VSSEGSchedMC<nfields, eew>;
       // Vector Strided Instructions
-      def VLSSEG#nf#E#eew#_V :
-        VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
-        VLSSEGSchedMC<nf, eew>;
-      def VSSSEG#nf#E#eew#_V :
-        VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
-        VSSSEGSchedMC<nf, eew>;
+      def VLSSEG#nfields#E#eew#_V :
+        VStridedSegmentLoad<nfields, w, "vlsseg"#nfields#"e"#eew#".v">,
+        VLSSEGSchedMC<nfields, eew>;
+      def VSSSEG#nfields#E#eew#_V :
+        VStridedSegmentStore<nfields, w, "vssseg"#nfields#"e"#eew#".v">,
+        VSSSEGSchedMC<nfields, eew>;
 
       // Vector Indexed Instructions
-      def VLUXSEG#nf#EI#eew#_V :
-        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
-                            "vluxseg"#nf#"ei"#eew#".v">,
-        RISCVVXMemOpMC<!logtwo(eew), Ordered=false, Store=false, N=nf>,
-        VLXSEGSchedMC<nf, eew, isOrdered=0>;
-      def VLOXSEG#nf#EI#eew#_V :
-        VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
-                            "vloxseg"#nf#"ei"#eew#".v">,
-        RISCVVXMemOpMC<!logtwo(eew), Ordered=true, Store=false, N=nf>,
-        VLXSEGSchedMC<nf, eew, isOrdered=1>;
-      def VSUXSEG#nf#EI#eew#_V :
-        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
-                             "vsuxseg"#nf#"ei"#eew#".v">,
-        RISCVVXMemOpMC<!logtwo(eew), Ordered=false, Store=true, N=nf>,
-        VSXSEGSchedMC<nf, eew, isOrdered=0>;
-      def VSOXSEG#nf#EI#eew#_V :
-        VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
-                             "vsoxseg"#nf#"ei"#eew#".v">,
-        RISCVVXMemOpMC<!logtwo(eew), Ordered=true, Store=true, N=nf>,
-        VSXSEGSchedMC<nf, eew, isOrdered=1>;
+      def VLUXSEG#nfields#EI#eew#_V :
+        VIndexedSegmentLoad<nfields, MOPLDIndexedUnord, w,
+                            "vluxseg"#nfields#"ei"#eew#".v">,
+        RISCVVXMemOpMC<!logtwo(eew), Ordered=false, Store=false, N=nfields>,
+        VLXSEGSchedMC<nfields, eew, isOrdered=0>;
+      def VLOXSEG#nfields#EI#eew#_V :
+        VIndexedSegmentLoad<nfields, MOPLDIndexedOrder, w,
+                            "vloxseg"#nfields#"ei"#eew#".v">,
+        RISCVVXMemOpMC<!logtwo(eew), Ordered=true, Store=false, N=nfields>,
+        VLXSEGSchedMC<nfields, eew, isOrdered=1>;
+      def VSUXSEG#nfields#EI#eew#_V :
+        VIndexedSegmentStore<nfields, MOPSTIndexedUnord, w,
+                             "vsuxseg"#nfields#"ei"#eew#".v">,
+        RISCVVXMemOpMC<!logtwo(eew), Ordered=false, Store=true, N=nfiel...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/179369


More information about the llvm-commits mailing list