[llvm] 7b36502 - [RISCV][CodeGen] Account for LMUL for Vector Integer load store instructions

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 6 16:58:03 PST 2022


Author: Michael Maitland
Date: 2022-12-06T16:57:35-08:00
New Revision: 7b3650285442f0c1cf7f7417706c0c66f9b5262c

URL: https://github.com/llvm/llvm-project/commit/7b3650285442f0c1cf7f7417706c0c66f9b5262c
DIFF: https://github.com/llvm/llvm-project/commit/7b3650285442f0c1cf7f7417706c0c66f9b5262c.diff

LOG: [RISCV][CodeGen] Account for LMUL for Vector Integer load store instructions

It is likley that subtargets act differently for a vector load store instructions based on the LMUL.
This patch creates seperate SchedRead, SchedWrite, WriteRes, ReadAdvance for each relevant LMUL.

Differential Revision: https://reviews.llvm.org/D137429

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVScheduleV.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 44af53bdaaf5..eec697361e3b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -98,54 +98,91 @@ def simm5_plus1_nonzero : ImmLeaf<XLenVT,
 // Scheduling definitions.
 //===----------------------------------------------------------------------===//
 
-class VMVRSched<int n>: Sched <[!cast<SchedReadWrite>("WriteVMov" # n # "V"),
-                                !cast<SchedReadWrite>("ReadVMov" # n # "V")]>;
-
-class VLESched : Sched <[WriteVLDE, ReadVLDX, ReadVMask]>;
-
-class VSESched : Sched <[WriteVSTE, ReadVSTEV, ReadVSTX, ReadVMask]>;
-
-class VLSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDS" # n),
-                                ReadVLDX, ReadVLDSX, ReadVMask]>;
-
-class VSSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTS" # n),
-                                !cast<SchedReadWrite>("ReadVSTS" # n # "V"),
-                                ReadVSTX, ReadVSTSX, ReadVMask]>;
-
-class VLXSched<int n, string o> :
-  Sched <[!cast<SchedReadWrite>("WriteVLD" # o # "X" # n),
-          ReadVLDX, !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
-
-class VSXSched<int n, string o> :
-  Sched <[!cast<SchedReadWrite>("WriteVST" # o # "X" # n),
-          !cast<SchedReadWrite>("ReadVST" # o # "X" # n),
-          ReadVSTX, !cast<SchedReadWrite>("ReadVST" # o # "XV"), ReadVMask]>;
-
-class VLFSched : Sched <[WriteVLDFF, ReadVLDX, ReadVMask]>;
+class VMVRSched<int n> : Sched<[
+  !cast<SchedReadWrite>("WriteVMov" #n #"V"),
+  !cast<SchedReadWrite>("ReadVMov" #n #"V")
+]>;
+
+class VLESched<string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLDE_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
+]>;
+
+class VSESched<string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVSTE_" #mx),
+  !cast<SchedReadWrite>("ReadVSTEV_" #mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
+]>;
+
+class VLSSched<int n, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLDS" #n #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx),
+  !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
+]>;
+
+class VSSSched<int n, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVSTS" #n #"_" #mx),
+  !cast<SchedReadWrite>("ReadVSTS" #n #"V_" #mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx),
+  !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
+]>;
+
+class VLXSched<int n, string o, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLD" #o #"X" #n #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx),
+  !cast<SchedReadWrite>("ReadVLD" #o #"XV_" #mx), ReadVMask
+]>;
+
+class VSXSched<int n, string o, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVST" #o #"X" #n #"_" #mx),
+  !cast<SchedReadWrite>("ReadVST" #o #"X" #n #"_" #mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx),
+  !cast<SchedReadWrite>("ReadVST" #o #"XV_" #mx), ReadVMask
+]>;
+
+class VLFSched<string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLDFF_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
+]>;
 
 // Unit-Stride Segment Loads and Stores
-class VLSEGSched<int nf, int eew> : Sched<[
-  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
-class VSSEGSched<int nf, int eew> : Sched<[
-  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew), ReadVSTEV, ReadVSTX,
-  ReadVMask]>;
-class VLSEGFFSched<int nf, int eew> : Sched<[
-  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
+class VLSEGSched<int nf, int eew, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
+]>;
+class VSSEGSched<int nf, int eew, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVSTEV_" #mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx), ReadVMask
+]>;
+class VLSEGFFSched<int nf, int eew, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx), ReadVMask
+]>;
 // Strided Segment Loads and Stores
-class VLSSEGSched<int nf, int eew> : Sched<[
-  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew), ReadVLDX, ReadVLDSX,
-  ReadVMask]>;
-class VSSSEGSched<int nf, int eew> : Sched<[
-  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew),
-  !cast<SchedReadWrite>("ReadVSTS" #eew #"V"), ReadVSTX, ReadVSTSX, ReadVMask]>;
+class VLSSEGSched<int nf, int eew, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx),
+  !cast<SchedReadWrite>("ReadVLDSX_" #mx), ReadVMask
+]>;
+class VSSSEGSched<int nf, int eew, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVSTS" #eew #"V" #"_" #mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx),
+  !cast<SchedReadWrite>("ReadVSTSX_" #mx), ReadVMask
+]>;
 // Indexed Segment Loads and Stores
-class VLXSEGSched<int nf, int eew, string o> : Sched<[
-  !cast<SchedReadWrite>("WriteVL" #o # "XSEG" #nf #"e" #eew), ReadVLDX,
-  !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
-class VSXSEGSched<int nf, int eew, string o> : Sched<[
-  !cast<SchedReadWrite>("WriteVS" #o # "XSEG" #nf #"e" #eew),
-  !cast<SchedReadWrite>("ReadVST" #o # "X" #eew), ReadVSTX,
-  !cast<SchedReadWrite>("ReadVST" #o # "XV"), ReadVMask]>;
+class VLXSEGSched<int nf, int eew, string o, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVL" #o #"XSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVLDX_" #mx),
+  !cast<SchedReadWrite>("ReadVLD" #o #"XV" #"_" #mx), ReadVMask
+]>;
+class VSXSEGSched<int nf, int eew, string o, string mx> : Sched<[
+  !cast<SchedReadWrite>("WriteVS" #o #"XSEG" #nf #"e" #eew #"_" #mx),
+  !cast<SchedReadWrite>("ReadVST" #o #"X" #eew # "_" # mx),
+  !cast<SchedReadWrite>("ReadVSTX_" #mx),
+  !cast<SchedReadWrite>("ReadVST" #o #"XV" # "_" # mx), ReadVMask
+]>;
 
 //===----------------------------------------------------------------------===//
 // Instruction class templates
@@ -386,17 +423,17 @@ multiclass VIndexLoadStore<list<int> EEWList> {
 
     def VLUXEI # n # _V :
       VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
-      VLXSched<n, "U">;
+      VLXSched<n, "U", UpperBoundLMUL>;
     def VLOXEI # n # _V :
       VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
-      VLXSched<n, "O">;
+      VLXSched<n, "O", UpperBoundLMUL>;
 
     def VSUXEI # n # _V :
       VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
-      VSXSched<n, "U">;
+      VSXSched<n, "U", UpperBoundLMUL>;
     def VSOXEI # n # _V :
       VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
-      VSXSched<n, "O">;
+      VSXSched<n, "O", UpperBoundLMUL>;
   }
 }
 
@@ -921,12 +958,12 @@ multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
     defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
 
     def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
-                     Sched<[s, ReadVLDX]>;
+                     Sched<[s, ReadVLDX_UpperBound]>;
   }
 }
 multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
   def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
-              Sched<[schedrw, ReadVLDX]>;
+              Sched<[schedrw, ReadVLDX_UpperBound]>;
 }
 
 //===----------------------------------------------------------------------===//
@@ -950,15 +987,15 @@ foreach eew = [8, 16, 32] in {
   defvar w = !cast<RISCVWidth>("LSWidth" # eew);
 
   // Vector Unit-Stride Instructions
-  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched;
-  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched;
+  def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<UpperBoundLMUL>;
+  def VSE#eew#_V  : VUnitStrideStore<w,  "vse"#eew#".v">, VSESched<UpperBoundLMUL>;
 
   // Vector Unit-Stride Fault-only-First Loads
-  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched;
+  def VLE#eew#FF_V : VUnitStrideLoadFF<w,  "vle"#eew#"ff.v">, VLFSched<UpperBoundLMUL>;
 
   // Vector Strided Instructions
-  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
-  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
+  def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew, UpperBoundLMUL>;
+  def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew, UpperBoundLMUL>;
 }
 
 defm "" : VIndexLoadStore<[8, 16, 32]>;
@@ -966,9 +1003,9 @@ defm "" : VIndexLoadStore<[8, 16, 32]>;
 
 let Predicates = [HasVInstructions] in {
 def VLM_V : VUnitStrideLoadMask<"vlm.v">,
-             Sched<[WriteVLDM, ReadVLDX]>;
+             Sched<[WriteVLDM_UpperBound, ReadVLDX_UpperBound]>;
 def VSM_V : VUnitStrideStoreMask<"vsm.v">,
-             Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>;
+             Sched<[WriteVSTM_UpperBound, ReadVSTM_UpperBound, ReadVSTX_UpperBound]>;
 def : InstAlias<"vle1.v $vd, (${rs1})",
                 (VLM_V VR:$vd, GPR:$rs1), 0>;
 def : InstAlias<"vse1.v $vs3, (${rs1})",
@@ -980,13 +1017,13 @@ defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
 defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
 
 def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
-             Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
+             Sched<[WriteVST1R, ReadVST1R, ReadVSTX_UpperBound]>;
 def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
-             Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
+             Sched<[WriteVST2R, ReadVST2R, ReadVSTX_UpperBound]>;
 def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
-             Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
+             Sched<[WriteVST4R, ReadVST4R, ReadVSTX_UpperBound]>;
 def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
-             Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
+             Sched<[WriteVST8R, ReadVST8R, ReadVSTX_UpperBound]>;
 
 def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
 def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
@@ -997,19 +1034,19 @@ def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
 let Predicates = [HasVInstructionsI64] in {
 // Vector Unit-Stride Instructions
 def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
-              VLESched;
+              VLESched<UpperBoundLMUL>;
 
 def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
-                VLFSched;
+                VLFSched<UpperBoundLMUL>;
 
 def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
-              VSESched;
+              VSESched<UpperBoundLMUL>;
 // Vector Strided Instructions
 def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
-               VLSSched<32>;
+               VLSSched<32, UpperBoundLMUL>;
 
 def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
-               VSSSched<64>;
+               VSSSched<64, UpperBoundLMUL>;
 
 defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R>;
 defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R>;
@@ -1650,38 +1687,38 @@ let Predicates = [HasVInstructions] in {
 
       def VLSEG#nf#E#eew#_V :
         VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
-        VLSEGSched<nf, eew>;
+        VLSEGSched<nf, eew, UpperBoundLMUL>;
       def VLSEG#nf#E#eew#FF_V :
         VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
-        VLSEGFFSched<nf, eew>;
+        VLSEGFFSched<nf, eew, UpperBoundLMUL>;
       def VSSEG#nf#E#eew#_V :
         VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
-        VSSEGSched<nf, eew>;
+        VSSEGSched<nf, eew, UpperBoundLMUL>;
       // Vector Strided Instructions
       def VLSSEG#nf#E#eew#_V :
         VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
-        VLSSEGSched<nf, eew>;
+        VLSSEGSched<nf, eew, UpperBoundLMUL>;
       def VSSSEG#nf#E#eew#_V :
         VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
-        VSSSEGSched<nf, eew>;
+        VSSSEGSched<nf, eew, UpperBoundLMUL>;
 
       // Vector Indexed Instructions
       def VLUXSEG#nf#EI#eew#_V :
         VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
                             "vluxseg"#nf#"ei"#eew#".v">,
-        VLXSEGSched<nf, eew, "U">;
+        VLXSEGSched<nf, eew, "U", UpperBoundLMUL>;
       def VLOXSEG#nf#EI#eew#_V :
         VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
                             "vloxseg"#nf#"ei"#eew#".v">,
-        VLXSEGSched<nf, eew, "O">;
+        VLXSEGSched<nf, eew, "O", UpperBoundLMUL>;
       def VSUXSEG#nf#EI#eew#_V :
         VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
                              "vsuxseg"#nf#"ei"#eew#".v">,
-        VSXSEGSched<nf, eew, "U">;
+        VSXSEGSched<nf, eew, "U", UpperBoundLMUL>;
       def VSOXSEG#nf#EI#eew#_V :
         VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
                              "vsoxseg"#nf#"ei"#eew#".v">,
-        VSXSEGSched<nf, eew, "O">;
+        VSXSEGSched<nf, eew, "O", UpperBoundLMUL>;
     }
   }
 } // Predicates = [HasVInstructions]
@@ -1691,38 +1728,42 @@ let Predicates = [HasVInstructionsI64] in {
     // Vector Unit-strided Segment Instructions
     def VLSEG#nf#E64_V :
       VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
-      VLSEGSched<nf, 64>;
+      VLSEGSched<nf, 64, UpperBoundLMUL>;
     def VLSEG#nf#E64FF_V :
       VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
-      VLSEGFFSched<nf, 64>;
+      VLSEGFFSched<nf, 64, UpperBoundLMUL>;
     def VSSEG#nf#E64_V :
       VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
-      VSSEGSched<nf, 64>;
+      VSSEGSched<nf, 64, UpperBoundLMUL>;
 
     // Vector Strided Segment Instructions
     def VLSSEG#nf#E64_V :
       VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
-      VLSSEGSched<nf, 64>;
+      VLSSEGSched<nf, 64, UpperBoundLMUL>;
     def VSSSEG#nf#E64_V :
       VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
-      VSSSEGSched<nf, 64>;
+      VSSSEGSched<nf, 64, UpperBoundLMUL>;
   }
 } // Predicates = [HasVInstructionsI64]
 let Predicates = [HasVInstructionsI64, IsRV64] in {
-  foreach nf=2-8 in {
+  foreach nf = 2 - 8 in {
     // Vector Indexed Segment Instructions
-    def VLUXSEG#nf#EI64_V :
-      VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
-                          "vluxseg"#nf#"ei64.v">, VLXSEGSched<nf, 64, "U">;
-    def VLOXSEG#nf#EI64_V :
-      VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
-                          "vloxseg"#nf#"ei64.v">, VLXSEGSched<nf, 64, "O">;
-    def VSUXSEG#nf#EI64_V :
-      VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
-                           "vsuxseg"#nf#"ei64.v">, VSXSEGSched<nf, 64, "U">;
-    def VSOXSEG#nf#EI64_V :
-      VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
-                           "vsoxseg"#nf#"ei64.v">, VSXSEGSched<nf, 64, "O">;
+    def VLUXSEG #nf #EI64_V
+        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
+                              "vluxseg" #nf #"ei64.v">,
+          VLXSEGSched<nf, 64, "U", UpperBoundLMUL>;
+    def VLOXSEG #nf #EI64_V
+        : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
+                              "vloxseg" #nf #"ei64.v">,
+          VLXSEGSched<nf, 64, "O", UpperBoundLMUL>;
+    def VSUXSEG #nf #EI64_V
+        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
+                               "vsuxseg" #nf #"ei64.v">,
+          VSXSEGSched<nf, 64, "U", UpperBoundLMUL>;
+    def VSOXSEG #nf #EI64_V
+        : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
+                               "vsoxseg" #nf #"ei64.v">,
+          VSXSEGSched<nf, 64, "O", UpperBoundLMUL>;
   }
 } // Predicates = [HasVInstructionsI64, IsRV64]
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 1713f607a2e7..135d0962562f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1632,14 +1632,14 @@ multiclass VPseudoUSLoad {
       let VLMul = lmul.value in {
         def "E" # eew # "_V_" # LInfo :
           VPseudoUSLoadNoMask<vreg, eew>,
-          VLESched;
+          VLESched<LInfo>;
         def "E" # eew # "_V_" # LInfo # "_TU":
           VPseudoUSLoadNoMaskTU<vreg, eew>,
-          VLESched;
+          VLESched<LInfo>;
         def "E" # eew # "_V_" # LInfo # "_MASK" :
           VPseudoUSLoadMask<vreg, eew>,
           RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
-          VLESched;
+          VLESched<LInfo>;
       }
     }
   }
@@ -1653,14 +1653,14 @@ multiclass VPseudoFFLoad {
       let VLMul = lmul.value in {
         def "E" # eew # "FF_V_" # LInfo:
           VPseudoUSLoadFFNoMask<vreg, eew>,
-          VLFSched;
+          VLFSched<LInfo>;
         def "E" # eew # "FF_V_" # LInfo # "_TU":
           VPseudoUSLoadFFNoMaskTU<vreg, eew>,
-          VLFSched;
+          VLFSched<LInfo>;
         def "E" # eew # "FF_V_" # LInfo # "_MASK":
           VPseudoUSLoadFFMask<vreg, eew>,
           RISCVMaskedPseudo</*MaskOpIdx*/ 2>,
-          VLFSched;
+          VLFSched<LInfo>;
       }
     }
   }
@@ -1668,8 +1668,12 @@ multiclass VPseudoFFLoad {
 
 multiclass VPseudoLoadMask {
   foreach mti = AllMasks in {
+    defvar mx = mti.LMul.MX;
+    defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
+    defvar ReadVLDX_MX = !cast<SchedRead>("ReadVLDX_" # mx);
     let VLMul = mti.LMul.value in {
-      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
+      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*DummyMask*/0>,
+        Sched<[WriteVLDM_MX, ReadVLDX_MX]>;
     }
   }
 }
@@ -1681,13 +1685,13 @@ multiclass VPseudoSLoad {
       defvar vreg = lmul.vrclass;
       let VLMul = lmul.value in {
         def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>,
-                                        VLSSched<eew>;
+                                        VLSSched<eew, LInfo>;
         def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU<vreg, eew>,
-                                        VLSSched<eew>;
+                                        VLSSched<eew, LInfo>;
         def "E" # eew # "_V_" # LInfo # "_MASK" :
           VPseudoSLoadMask<vreg, eew>,
           RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
-          VLSSched<eew>;
+          VLSSched<eew, LInfo>;
       }
     }
   }
@@ -1711,14 +1715,14 @@ multiclass VPseudoILoad<bit Ordered> {
           let VLMul = lmul.value in {
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
               VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
-              VLXSched<eew, Order>;
+              VLXSched<eew, Order, LInfo>;
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU":
               VPseudoILoadNoMaskTU<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
-              VLXSched<eew, Order>;
+              VLXSched<eew, Order, LInfo>;
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
               VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>,
               RISCVMaskedPseudo</*MaskOpIdx*/ 3>,
-              VLXSched<eew, Order>;
+              VLXSched<eew, Order, LInfo>;
           }
         }
       }
@@ -1733,9 +1737,9 @@ multiclass VPseudoUSStore {
       defvar vreg = lmul.vrclass;
       let VLMul = lmul.value in {
         def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>,
-                                        VSESched;
+                                        VSESched<LInfo>;
         def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>,
-                                                  VSESched;
+                                                  VSESched<LInfo>;
       }
     }
   }
@@ -1743,8 +1747,12 @@ multiclass VPseudoUSStore {
 
 multiclass VPseudoStoreMask {
   foreach mti = AllMasks in {
+    defvar mx = mti.LMul.MX;
+    defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
+    defvar ReadVSTX_MX = !cast<SchedRead>("ReadVSTX_" # mx);
     let VLMul = mti.LMul.value in {
-      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
+      def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>,
+        Sched<[WriteVSTM_MX, ReadVSTX_MX]>;
     }
   }
 }
@@ -1756,9 +1764,9 @@ multiclass VPseudoSStore {
       defvar vreg = lmul.vrclass;
       let VLMul = lmul.value in {
         def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>,
-                                        VSSSched<eew>;
+                                        VSSSched<eew, LInfo>;
         def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>,
-                                                  VSSSched<eew>;
+                                                  VSSSched<eew, LInfo>;
       }
     }
   }
@@ -1781,10 +1789,10 @@ multiclass VPseudoIStore<bit Ordered> {
           let VLMul = lmul.value in {
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
               VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
-              VSXSched<eew, Order>;
+              VSXSched<eew, Order, LInfo>;
             def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
               VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>,
-              VSXSched<eew, Order>;
+              VSXSched<eew, Order, LInfo>;
           }
         }
       }
@@ -3553,11 +3561,11 @@ multiclass VPseudoUSSegLoad {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
           def nf # "E" # eew # "_V_" # LInfo :
-            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew>;
+            VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_TU" :
-            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>, VLSEGSched<nf, eew>;
+            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
-            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew>;
+            VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew, LInfo>;
         }
       }
     }
@@ -3572,11 +3580,11 @@ multiclass VPseudoUSSegLoadFF {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
           def nf # "E" # eew # "FF_V_" # LInfo :
-            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
+            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
           def nf # "E" # eew # "FF_V_" # LInfo # "_TU" :
-            VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
+            VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
           def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
-            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
+            VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew, LInfo>;
         }
       }
     }
@@ -3591,11 +3599,11 @@ multiclass VPseudoSSegLoad {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
           def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
-                                               VLSSEGSched<nf, eew>;
+                                               VLSSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU<vreg, eew, nf>,
-                                                       VLSSEGSched<nf, eew>;
+                                                       VLSSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
-                                                         VLSSEGSched<nf, eew>;
+                                                         VLSSEGSched<nf, eew, LInfo>;
         }
       }
     }
@@ -3622,15 +3630,15 @@ multiclass VPseudoISegLoad<bit Ordered> {
               def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
                 VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
                                       nf, Ordered>,
-                VLXSEGSched<nf, idx_eew, Order>;
+                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
               def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" :
                 VPseudoISegLoadNoMaskTU<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
                                         nf, Ordered>,
-                VLXSEGSched<nf, idx_eew, Order>;
+                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
               def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
                 VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
                                     nf, Ordered>,
-                VLXSEGSched<nf, idx_eew, Order>;
+                VLXSEGSched<nf, idx_eew, Order, ValLInfo>;
             }
           }
         }
@@ -3647,9 +3655,9 @@ multiclass VPseudoUSSegStore {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
           def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
-                                               VSSEGSched<nf, eew>;
+                                               VSSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
-                                                         VSSEGSched<nf, eew>;
+                                                         VSSEGSched<nf, eew, LInfo>;
         }
       }
     }
@@ -3664,9 +3672,9 @@ multiclass VPseudoSSegStore {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
           def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
-                                               VSSSEGSched<nf, eew>;
+                                               VSSSEGSched<nf, eew, LInfo>;
           def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
-                                                         VSSSEGSched<nf, eew>;
+                                                         VSSSEGSched<nf, eew, LInfo>;
         }
       }
     }
@@ -3693,11 +3701,11 @@ multiclass VPseudoISegStore<bit Ordered> {
               def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
                 VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
                                        nf, Ordered>,
-                VSXSEGSched<nf, idx_eew, Order>;
+                VSXSEGSched<nf, idx_eew, Order, ValLInfo>;
               def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
                 VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
                                      nf, Ordered>,
-                VSXSEGSched<nf, idx_eew, Order>;
+                VSXSEGSched<nf, idx_eew, Order, ValLInfo>;
             }
           }
         }
@@ -5149,10 +5157,8 @@ def PseudoVSETIVLI : Pseudo<(outs GPR:$rd), (ins uimm5:$rs1, VTypeIOp10:$vtypei)
 defm PseudoVL : VPseudoUSLoad;
 defm PseudoVS : VPseudoUSStore;
 
-defm PseudoVLM : VPseudoLoadMask,
-                 Sched<[WriteVLDM, ReadVLDX]>;
-defm PseudoVSM : VPseudoStoreMask,
-                 Sched<[WriteVSTM, ReadVSTX]>;
+defm PseudoVLM : VPseudoLoadMask;
+defm PseudoVSM : VPseudoStoreMask;
 
 //===----------------------------------------------------------------------===//
 // 7.5 Vector Strided Instructions

diff  --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 06da5b3833aa..62054b0a8e6e 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -9,6 +9,7 @@
 //===----------------------------------------------------------------------===//
 /// Define scheduler resources associated with def operands.
 
+defvar UpperBoundLMUL = "UpperBound";
 defvar SchedMxList = ["UpperBound", "M1", "M2", "M4", "M8", "MF2", "MF4", "MF8"];
 // Used for widening and narrowing instructions as it doesn't contain M8.
 defvar SchedMxListW = ["UpperBound", "MF8", "MF4", "MF2", "M1", "M2", "M4"];
@@ -114,51 +115,51 @@ def WriteVSETVL       : SchedWrite;
 
 // 7. Vector Loads and Stores
 // 7.4. Vector Unit-Stride Instructions
-def WriteVLDE         : SchedWrite;
-def WriteVSTE         : SchedWrite;
+defm "" : LMULSchedWrites<"WriteVLDE">;
+defm "" : LMULSchedWrites<"WriteVSTE">;
 // 7.4.1. Vector Unit-Strided Mask
-def WriteVLDM         : SchedWrite;
-def WriteVSTM         : SchedWrite;
+defm "" : LMULSchedWrites<"WriteVLDM">;
+defm "" : LMULSchedWrites<"WriteVSTM">;
 // 7.5. Vector Strided Instructions
-def WriteVLDS8        : SchedWrite;
-def WriteVLDS16       : SchedWrite;
-def WriteVLDS32       : SchedWrite;
-def WriteVLDS64       : SchedWrite;
-def WriteVSTS8        : SchedWrite;
-def WriteVSTS16       : SchedWrite;
-def WriteVSTS32       : SchedWrite;
-def WriteVSTS64       : SchedWrite;
+defm "" : LMULSchedWrites<"WriteVLDS8">;
+defm "" : LMULSchedWrites<"WriteVLDS16">;
+defm "" : LMULSchedWrites<"WriteVLDS32">;
+defm "" : LMULSchedWrites<"WriteVLDS64">;
+defm "" : LMULSchedWrites<"WriteVSTS8">;
+defm "" : LMULSchedWrites<"WriteVSTS16">;
+defm "" : LMULSchedWrites<"WriteVSTS32">;
+defm "" : LMULSchedWrites<"WriteVSTS64">;
 // 7.6. Vector Indexed Instructions
-def WriteVLDUX8       : SchedWrite;
-def WriteVLDUX16      : SchedWrite;
-def WriteVLDUX32      : SchedWrite;
-def WriteVLDUX64      : SchedWrite;
-def WriteVLDOX8       : SchedWrite;
-def WriteVLDOX16      : SchedWrite;
-def WriteVLDOX32      : SchedWrite;
-def WriteVLDOX64      : SchedWrite;
-def WriteVSTUX8       : SchedWrite;
-def WriteVSTUX16      : SchedWrite;
-def WriteVSTUX32      : SchedWrite;
-def WriteVSTUX64      : SchedWrite;
-def WriteVSTOX8       : SchedWrite;
-def WriteVSTOX16      : SchedWrite;
-def WriteVSTOX32      : SchedWrite;
-def WriteVSTOX64      : SchedWrite;
+defm "" : LMULSchedWrites<"WriteVLDUX8">;
+defm "" : LMULSchedWrites<"WriteVLDUX16">;
+defm "" : LMULSchedWrites<"WriteVLDUX32">;
+defm "" : LMULSchedWrites<"WriteVLDUX64">;
+defm "" : LMULSchedWrites<"WriteVLDOX8">;
+defm "" : LMULSchedWrites<"WriteVLDOX16">;
+defm "" : LMULSchedWrites<"WriteVLDOX32">;
+defm "" : LMULSchedWrites<"WriteVLDOX64">;
+defm "" : LMULSchedWrites<"WriteVSTUX8">;
+defm "" : LMULSchedWrites<"WriteVSTUX16">;
+defm "" : LMULSchedWrites<"WriteVSTUX32">;
+defm "" : LMULSchedWrites<"WriteVSTUX64">;
+defm "" : LMULSchedWrites<"WriteVSTOX8">;
+defm "" : LMULSchedWrites<"WriteVSTOX16">;
+defm "" : LMULSchedWrites<"WriteVSTOX32">;
+defm "" : LMULSchedWrites<"WriteVSTOX64">;
 // 7.7. Vector Unit-stride Fault-Only-First Loads
-def WriteVLDFF        : SchedWrite;
+defm "" : LMULSchedWrites<"WriteVLDFF">;
 // 7.8. Vector Segment Instructions
 foreach nf=2-8 in {
   foreach eew = [8, 16, 32, 64] in {
-    def WriteVLSEG # nf # e # eew : SchedWrite;
-    def WriteVSSEG # nf # e # eew : SchedWrite;
-    def WriteVLSEGFF # nf # e # eew : SchedWrite;
-    def WriteVLSSEG # nf # e # eew : SchedWrite;
-    def WriteVSSSEG # nf # e # eew : SchedWrite;
-    def WriteVLUXSEG # nf # e # eew : SchedWrite;
-    def WriteVLOXSEG # nf # e # eew : SchedWrite;
-    def WriteVSUXSEG # nf # e # eew : SchedWrite;
-    def WriteVSOXSEG # nf # e # eew : SchedWrite;
+    defm "" : LMULSchedWrites<"WriteVLSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVSSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVLSEGFF" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVLSSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVSSSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVLUXSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVLOXSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVSUXSEG" # nf # e # eew>;
+    defm "" : LMULSchedWrites<"WriteVSOXSEG" # nf # e # eew>;
   }
 }
 // 7.9. Vector Whole Register Instructions
@@ -356,41 +357,42 @@ def ReadVSETVLI       : SchedRead;
 def ReadVSETVL        : SchedRead;
 
 // 7. Vector Loads and Stores
-def ReadVLDX          : SchedRead;
-def ReadVSTX          : SchedRead;
+defm "" : LMULSchedReads<"ReadVLDX">;
+defm "" : LMULSchedReads<"ReadVSTX">;
 // 7.4. Vector Unit-Stride Instructions
-def ReadVSTEV        : SchedRead;
+defm "" : LMULSchedReads<"ReadVSTEV">;
 // 7.4.1. Vector Unit-Strided Mask
-def ReadVSTM          : SchedRead;
+defm "" : LMULSchedReads<"ReadVSTM">;
 // 7.5. Vector Strided Instructions
-def ReadVLDSX         : SchedRead;
-def ReadVSTSX         : SchedRead;
-def ReadVSTS8V        : SchedRead;
-def ReadVSTS16V       : SchedRead;
-def ReadVSTS32V       : SchedRead;
-def ReadVSTS64V       : SchedRead;
+defm "" : LMULSchedReads<"ReadVLDSX">;
+defm "" : LMULSchedReads<"ReadVSTSX">;
+defm "" : LMULSchedReads<"ReadVSTS8V">;
+defm "" : LMULSchedReads<"ReadVSTS16V">;
+defm "" : LMULSchedReads<"ReadVSTS32V">;
+defm "" : LMULSchedReads<"ReadVSTS64V">;
 // 7.6. Vector Indexed Instructions
-def ReadVLDUXV        : SchedRead;
-def ReadVLDOXV        : SchedRead;
-def ReadVSTUX8        : SchedRead;
-def ReadVSTUX16       : SchedRead;
-def ReadVSTUX32       : SchedRead;
-def ReadVSTUX64       : SchedRead;
-def ReadVSTUXV        : SchedRead;
-def ReadVSTUX8V       : SchedRead;
-def ReadVSTUX16V      : SchedRead;
-def ReadVSTUX32V      : SchedRead;
-def ReadVSTUX64V      : SchedRead;
-def ReadVSTOX8        : SchedRead;
-def ReadVSTOX16       : SchedRead;
-def ReadVSTOX32       : SchedRead;
-def ReadVSTOX64       : SchedRead;
-def ReadVSTOXV        : SchedRead;
-def ReadVSTOX8V       : SchedRead;
-def ReadVSTOX16V      : SchedRead;
-def ReadVSTOX32V      : SchedRead;
-def ReadVSTOX64V      : SchedRead;
+defm "" : LMULSchedReads<"ReadVLDUXV">;
+defm "" : LMULSchedReads<"ReadVLDOXV">;
+defm "" : LMULSchedReads<"ReadVSTUX8">;
+defm "" : LMULSchedReads<"ReadVSTUX16">;
+defm "" : LMULSchedReads<"ReadVSTUX32">;
+defm "" : LMULSchedReads<"ReadVSTUX64">;
+defm "" : LMULSchedReads<"ReadVSTUXV">;
+defm "" : LMULSchedReads<"ReadVSTUX8V">;
+defm "" : LMULSchedReads<"ReadVSTUX16V">;
+defm "" : LMULSchedReads<"ReadVSTUX32V">;
+defm "" : LMULSchedReads<"ReadVSTUX64V">;
+defm "" : LMULSchedReads<"ReadVSTOX8">;
+defm "" : LMULSchedReads<"ReadVSTOX16">;
+defm "" : LMULSchedReads<"ReadVSTOX32">;
+defm "" : LMULSchedReads<"ReadVSTOX64">;
+defm "" : LMULSchedReads<"ReadVSTOXV">;
+defm "" : LMULSchedReads<"ReadVSTOX8V">;
+defm "" : LMULSchedReads<"ReadVSTOX16V">;
+defm "" : LMULSchedReads<"ReadVSTOX32V">;
+defm "" : LMULSchedReads<"ReadVSTOX64V">;
 // 7.9. Vector Whole Register Instructions
+// These are already LMUL aware
 def ReadVST1R         : SchedRead;
 def ReadVST2R         : SchedRead;
 def ReadVST4R         : SchedRead;
@@ -586,35 +588,36 @@ def : WriteRes<WriteVSETIVLI, []>;
 def : WriteRes<WriteVSETVL, []>;
 
 // 7. Vector Loads and Stores
-def : WriteRes<WriteVLDE, []>;
-def : WriteRes<WriteVSTE, []>;
-def : WriteRes<WriteVLDM, []>;
-def : WriteRes<WriteVSTM, []>;
-def : WriteRes<WriteVLDS8, []>;
-def : WriteRes<WriteVLDS16, []>;
-def : WriteRes<WriteVLDS32, []>;
-def : WriteRes<WriteVLDS64, []>;
-def : WriteRes<WriteVSTS8, []>;
-def : WriteRes<WriteVSTS16, []>;
-def : WriteRes<WriteVSTS32, []>;
-def : WriteRes<WriteVSTS64, []>;
-def : WriteRes<WriteVLDUX8, []>;
-def : WriteRes<WriteVLDUX16, []>;
-def : WriteRes<WriteVLDUX32, []>;
-def : WriteRes<WriteVLDUX64, []>;
-def : WriteRes<WriteVLDOX8, []>;
-def : WriteRes<WriteVLDOX16, []>;
-def : WriteRes<WriteVLDOX32, []>;
-def : WriteRes<WriteVLDOX64, []>;
-def : WriteRes<WriteVSTUX8, []>;
-def : WriteRes<WriteVSTUX16, []>;
-def : WriteRes<WriteVSTUX32, []>;
-def : WriteRes<WriteVSTUX64, []>;
-def : WriteRes<WriteVSTOX8, []>;
-def : WriteRes<WriteVSTOX16, []>;
-def : WriteRes<WriteVSTOX32, []>;
-def : WriteRes<WriteVSTOX64, []>;
-def : WriteRes<WriteVLDFF, []>;
+defm "" : LMULWriteRes<"WriteVLDE", []>;
+defm "" : LMULWriteRes<"WriteVSTE", []>;
+defm "" : LMULWriteRes<"WriteVLDM", []>;
+defm "" : LMULWriteRes<"WriteVSTM", []>;
+defm "" : LMULWriteRes<"WriteVLDS8", []>;
+defm "" : LMULWriteRes<"WriteVLDS16", []>;
+defm "" : LMULWriteRes<"WriteVLDS32", []>;
+defm "" : LMULWriteRes<"WriteVLDS64", []>;
+defm "" : LMULWriteRes<"WriteVSTS8", []>;
+defm "" : LMULWriteRes<"WriteVSTS16", []>;
+defm "" : LMULWriteRes<"WriteVSTS32", []>;
+defm "" : LMULWriteRes<"WriteVSTS64", []>;
+defm "" : LMULWriteRes<"WriteVLDUX8", []>;
+defm "" : LMULWriteRes<"WriteVLDUX16", []>;
+defm "" : LMULWriteRes<"WriteVLDUX32", []>;
+defm "" : LMULWriteRes<"WriteVLDUX64", []>;
+defm "" : LMULWriteRes<"WriteVLDOX8", []>;
+defm "" : LMULWriteRes<"WriteVLDOX16", []>;
+defm "" : LMULWriteRes<"WriteVLDOX32", []>;
+defm "" : LMULWriteRes<"WriteVLDOX64", []>;
+defm "" : LMULWriteRes<"WriteVSTUX8", []>;
+defm "" : LMULWriteRes<"WriteVSTUX16", []>;
+defm "" : LMULWriteRes<"WriteVSTUX32", []>;
+defm "" : LMULWriteRes<"WriteVSTUX64", []>;
+defm "" : LMULWriteRes<"WriteVSTOX8", []>;
+defm "" : LMULWriteRes<"WriteVSTOX16", []>;
+defm "" : LMULWriteRes<"WriteVSTOX32", []>;
+defm "" : LMULWriteRes<"WriteVSTOX64", []>;
+defm "" : LMULWriteRes<"WriteVLDFF", []>;
+// These are already LMUL aware
 def : WriteRes<WriteVLD1R, []>;
 def : WriteRes<WriteVLD2R, []>;
 def : WriteRes<WriteVLD4R, []>;
@@ -626,15 +629,15 @@ def : WriteRes<WriteVST8R, []>;
 // Vector Segment Loads and Stores
 foreach nf=2-8 in {
   foreach eew = [8, 16, 32, 64] in {
-    def : WriteRes <!cast<SchedWrite>("WriteVLSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVLSEGFF" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVSSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVLSSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVSSSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVLUXSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVLOXSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVSUXSEG" # nf # "e" # eew), []>;
-    def : WriteRes <!cast<SchedWrite>("WriteVSOXSEG" # nf # "e" # eew), []>;
+    defm "" : LMULWriteRes <"WriteVLSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVLSEGFF" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVSSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVLSSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVSSSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVLUXSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVLOXSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVSUXSEG" # nf # "e" # eew, []>;
+    defm "" : LMULWriteRes <"WriteVSOXSEG" # nf # "e" # eew, []>;
   }
 }
 
@@ -763,36 +766,37 @@ def : ReadAdvance<ReadVSETVLI, 0>;
 def : ReadAdvance<ReadVSETVL, 0>;
 
 // 7. Vector Loads and Stores
-def : ReadAdvance<ReadVLDX, 0>;
-def : ReadAdvance<ReadVSTX, 0>;
-def : ReadAdvance<ReadVSTEV, 0>;
-def : ReadAdvance<ReadVSTM, 0>;
-def : ReadAdvance<ReadVLDSX, 0>;
-def : ReadAdvance<ReadVSTSX, 0>;
-def : ReadAdvance<ReadVSTS8V, 0>;
-def : ReadAdvance<ReadVSTS16V, 0>;
-def : ReadAdvance<ReadVSTS32V, 0>;
-def : ReadAdvance<ReadVSTS64V, 0>;
-def : ReadAdvance<ReadVLDUXV, 0>;
-def : ReadAdvance<ReadVLDOXV, 0>;
-def : ReadAdvance<ReadVSTUXV, 0>;
-def : ReadAdvance<ReadVSTUX8, 0>;
-def : ReadAdvance<ReadVSTUX16, 0>;
-def : ReadAdvance<ReadVSTUX32, 0>;
-def : ReadAdvance<ReadVSTUX64, 0>;
-def : ReadAdvance<ReadVSTUX8V, 0>;
-def : ReadAdvance<ReadVSTUX16V, 0>;
-def : ReadAdvance<ReadVSTUX32V, 0>;
-def : ReadAdvance<ReadVSTUX64V, 0>;
-def : ReadAdvance<ReadVSTOX8, 0>;
-def : ReadAdvance<ReadVSTOX16, 0>;
-def : ReadAdvance<ReadVSTOX32, 0>;
-def : ReadAdvance<ReadVSTOX64, 0>;
-def : ReadAdvance<ReadVSTOXV, 0>;
-def : ReadAdvance<ReadVSTOX8V, 0>;
-def : ReadAdvance<ReadVSTOX16V, 0>;
-def : ReadAdvance<ReadVSTOX32V, 0>;
-def : ReadAdvance<ReadVSTOX64V, 0>;
+defm "" : LMULReadAdvance<"ReadVLDX", 0>;
+defm "" : LMULReadAdvance<"ReadVSTX", 0>;
+defm "" : LMULReadAdvance<"ReadVSTEV", 0>;
+defm "" : LMULReadAdvance<"ReadVSTM", 0>;
+defm "" : LMULReadAdvance<"ReadVLDSX", 0>;
+defm "" : LMULReadAdvance<"ReadVSTSX", 0>;
+defm "" : LMULReadAdvance<"ReadVSTS8V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTS16V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTS32V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTS64V", 0>;
+defm "" : LMULReadAdvance<"ReadVLDUXV", 0>;
+defm "" : LMULReadAdvance<"ReadVLDOXV", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUXV", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX8", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX16", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX32", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX64", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX8V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX16V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX32V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTUX64V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX8", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX16", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX32", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX64", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOXV", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX8V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX16V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX32V", 0>;
+defm "" : LMULReadAdvance<"ReadVSTOX64V", 0>;
+// These are already LMUL aware
 def : ReadAdvance<ReadVST1R, 0>;
 def : ReadAdvance<ReadVST2R, 0>;
 def : ReadAdvance<ReadVST4R, 0>;


        


More information about the llvm-commits mailing list