[llvm] [RISCV] Factor out common SiFive7 scheduling model into an abstraction layer (PR #144442)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 16 21:14:59 PDT 2025


================
@@ -186,1121 +186,1188 @@ class SiFive7AnyToGPRBypass<SchedRead read, int cycles = 2>
                                  WriteIRem, WriteIRem32,
                                  WriteLDB, WriteLDH, WriteLDW, WriteLDD]>;
 
-// SiFive7 machine model for scheduling and other instruction cost heuristics.
-def SiFive7Model : SchedMachineModel {
-  let MicroOpBufferSize = 0; // Explicitly set to zero since SiFive7 is in-order.
-  let IssueWidth = 2;        // 2 micro-ops are dispatched per cycle.
-  let LoadLatency = 3;
-  let MispredictPenalty = 3;
-  let CompleteModel = 0;
-  let EnableIntervals = true;
-  let UnsupportedFeatures = [HasStdExtZbkb, HasStdExtZbkc, HasStdExtZbkx,
-                             HasStdExtZcmt, HasStdExtZknd, HasStdExtZkne,
-                             HasStdExtZknh, HasStdExtZksed, HasStdExtZksh,
-                             HasStdExtZkr];
-}
-
 // The SiFive7 microarchitecture has three pipelines: A, B, V.
 // Pipe A can handle memory, integer alu and vector operations.
 // Pipe B can handle integer alu, control flow, integer multiply and divide,
 // and floating point computation.
 // The V pipeline is modeled by the VCQ, VA, VL, and VS resources.
-let SchedModel = SiFive7Model in {
-let BufferSize = 0 in {
-def SiFive7PipeA       : ProcResource<1>;
-def SiFive7PipeB       : ProcResource<1>;
-def SiFive7IDiv        : ProcResource<1>; // Int Division
-def SiFive7FDiv        : ProcResource<1>; // FP Division/Sqrt
-def SiFive7VA          : ProcResource<1>; // Arithmetic sequencer
-def SiFive7VL          : ProcResource<1>; // Load sequencer
-def SiFive7VS          : ProcResource<1>; // Store sequencer
-// The VCQ accepts instructions from the the A Pipe and holds them until the
-// vector unit is ready to dequeue them. The unit dequeues up to one instruction
-// per cycle, in order, as soon as the sequencer for that type of instruction is
-// available. This resource is meant to be used for 1 cycle by all vector
-// instructions, to model that only one vector instruction may be dequeued at a
-// time. The actual dequeueing into the sequencer is modeled by the VA, VL, and
-// VS sequencer resources below. Each of them will only accept a single
-// instruction at a time and remain busy for the number of cycles associated
-// with that instruction.
-def SiFive7VCQ         : ProcResource<1>; // Vector Command Queue
-}
-
-def SiFive7PipeAB : ProcResGroup<[SiFive7PipeA, SiFive7PipeB]>;
-
-defvar SiFive7VLEN = 512;
-
-// Branching
-let Latency = 3 in {
-def : WriteRes<WriteJmp, [SiFive7PipeB]>;
-def : WriteRes<WriteJal, [SiFive7PipeB]>;
-def : WriteRes<WriteJalr, [SiFive7PipeB]>;
-}
-
-//Short forward branch
-def : WriteRes<WriteSFB, [SiFive7PipeA, SiFive7PipeB]> {
-  let Latency = 3;
-  let NumMicroOps = 2;
-}
-
-// Integer arithmetic and logic
-let Latency = 3 in {
-def : WriteRes<WriteIALU, [SiFive7PipeAB]>;
-def : WriteRes<WriteIALU32, [SiFive7PipeAB]>;
-def : WriteRes<WriteShiftImm, [SiFive7PipeAB]>;
-def : WriteRes<WriteShiftImm32, [SiFive7PipeAB]>;
-def : WriteRes<WriteShiftReg, [SiFive7PipeAB]>;
-def : WriteRes<WriteShiftReg32, [SiFive7PipeAB]>;
-}
+multiclass SiFive7ProcResources {
+  let BufferSize = 0 in {
+    def PipeA     : ProcResource<1>;
+    def PipeB     : ProcResource<1>;
+
+    def IDiv      : ProcResource<1>; // Int Division
+    def FDiv      : ProcResource<1>; // FP Division/Sqrt
+
+    def VA      : ProcResource<1>; // Arithmetic sequencer
+
+    def VL        : ProcResource<1>; // Load sequencer
+    def VS        : ProcResource<1>; // Store sequencer
+    // The VCQ accepts instructions from the the A Pipe and holds them until the
+    // vector unit is ready to dequeue them. The unit dequeues up to one instruction
+    // per cycle, in order, as soon as the sequencer for that type of instruction is
+    // available. This resource is meant to be used for 1 cycle by all vector
+    // instructions, to model that only one vector instruction may be dequeued at a
+    // time. The actual dequeueing into the sequencer is modeled by the VA, VL, and
+    // VS sequencer resources below. Each of them will only accept a single
+    // instruction at a time and remain busy for the number of cycles associated
+    // with that instruction.
+    def VCQ       : ProcResource<1>; // Vector Command Queue
+  }
 
-// Integer multiplication
-let Latency = 3 in {
-def : WriteRes<WriteIMul, [SiFive7PipeB]>;
-def : WriteRes<WriteIMul32, [SiFive7PipeB]>;
+  def PipeAB : ProcResGroup<[!cast<ProcResource>(NAME#"PipeA"),
+                             !cast<ProcResource>(NAME#"PipeB")]>;
 }
 
-// Integer division
-def : WriteRes<WriteIDiv, [SiFive7PipeB, SiFive7IDiv]> {
-  let Latency = 66;
-  let ReleaseAtCycles = [1, 65];
-}
-def : WriteRes<WriteIDiv32,  [SiFive7PipeB, SiFive7IDiv]> {
-  let Latency = 34;
-  let ReleaseAtCycles = [1, 33];
-}
+multiclass SiFive7WriteResBase<int VLEN,
+    ProcResourceKind PipeA, ProcResourceKind PipeB, ProcResourceKind PipeAB,
+    ProcResourceKind IDiv, ProcResourceKind FDiv,
+    ProcResourceKind VA, ProcResourceKind VL, ProcResourceKind VS,
+    ProcResourceKind VCQ> {
 
-// Integer remainder
-def : WriteRes<WriteIRem, [SiFive7PipeB, SiFive7IDiv]> {
-  let Latency = 66;
-  let ReleaseAtCycles = [1, 65];
-}
-def : WriteRes<WriteIRem32,  [SiFive7PipeB, SiFive7IDiv]> {
-  let Latency = 34;
-  let ReleaseAtCycles = [1, 33];
-}
+  // Branching
+  let Latency = 3 in {
+    def : WriteRes<WriteJmp, [PipeB]>;
+    def : WriteRes<WriteJal, [PipeB]>;
+    def : WriteRes<WriteJalr, [PipeB]>;
+  }
 
-// Bitmanip
-let Latency = 3 in {
-// Rotates are in the late-B ALU.
-def : WriteRes<WriteRotateImm, [SiFive7PipeB]>;
-def : WriteRes<WriteRotateImm32, [SiFive7PipeB]>;
-def : WriteRes<WriteRotateReg, [SiFive7PipeB]>;
-def : WriteRes<WriteRotateReg32, [SiFive7PipeB]>;
+  //Short forward branch
+  def : WriteRes<WriteSFB, [PipeA, PipeB]> {
+    let Latency = 3;
+    let NumMicroOps = 2;
+  }
 
-// clz[w]/ctz[w] are in the late-B ALU.
-def : WriteRes<WriteCLZ, [SiFive7PipeB]>;
-def : WriteRes<WriteCLZ32, [SiFive7PipeB]>;
-def : WriteRes<WriteCTZ, [SiFive7PipeB]>;
-def : WriteRes<WriteCTZ32, [SiFive7PipeB]>;
+  // Integer arithmetic and logic
+  let Latency = 3 in {
+    def : WriteRes<WriteIALU, [PipeAB]>;
+    def : WriteRes<WriteIALU32, [PipeAB]>;
+    def : WriteRes<WriteShiftImm, [PipeAB]>;
+    def : WriteRes<WriteShiftImm32, [PipeAB]>;
+    def : WriteRes<WriteShiftReg, [PipeAB]>;
+    def : WriteRes<WriteShiftReg32, [PipeAB]>;
+  }
 
-// cpop[w] look exactly like multiply.
-def : WriteRes<WriteCPOP, [SiFive7PipeB]>;
-def : WriteRes<WriteCPOP32, [SiFive7PipeB]>;
+  // Integer multiplication
+  let Latency = 3 in {
+    def : WriteRes<WriteIMul, [PipeB]>;
+    def : WriteRes<WriteIMul32, [PipeB]>;
+  }
 
-// orc.b is in the late-B ALU.
-def : WriteRes<WriteORCB, [SiFive7PipeB]>;
+  // Integer division
+  def : WriteRes<WriteIDiv, [PipeB, IDiv]> {
+    let Latency = 66;
+    let ReleaseAtCycles = [1, 65];
+  }
+  def : WriteRes<WriteIDiv32,  [PipeB, IDiv]> {
+    let Latency = 34;
+    let ReleaseAtCycles = [1, 33];
+  }
 
-// min/max are in the late-B ALU
-def : WriteRes<WriteIMinMax, [SiFive7PipeB]>;
+  // Integer remainder
+  def : WriteRes<WriteIRem, [PipeB, IDiv]> {
+    let Latency = 66;
+    let ReleaseAtCycles = [1, 65];
+  }
+  def : WriteRes<WriteIRem32,  [PipeB, IDiv]> {
+    let Latency = 34;
+    let ReleaseAtCycles = [1, 33];
+  }
 
-// rev8 is in the late-A and late-B ALUs.
-def : WriteRes<WriteREV8, [SiFive7PipeAB]>;
+  // Bitmanip
+  let Latency = 3 in {
+    // Rotates are in the late-B ALU.
+    def : WriteRes<WriteRotateImm, [PipeB]>;
+    def : WriteRes<WriteRotateImm32, [PipeB]>;
+    def : WriteRes<WriteRotateReg, [PipeB]>;
+    def : WriteRes<WriteRotateReg32, [PipeB]>;
 
-// shNadd[.uw] is on the early-B and late-B ALUs.
-def : WriteRes<WriteSHXADD, [SiFive7PipeB]>;
-def : WriteRes<WriteSHXADD32, [SiFive7PipeB]>;
-}
+    // clz[w]/ctz[w] are in the late-B ALU.
+    def : WriteRes<WriteCLZ, [PipeB]>;
+    def : WriteRes<WriteCLZ32, [PipeB]>;
+    def : WriteRes<WriteCTZ, [PipeB]>;
+    def : WriteRes<WriteCTZ32, [PipeB]>;
 
-// Single-bit instructions
-// BEXT[I] instruction is available on all ALUs and the other instructions
-// are only available on the SiFive7B pipe.
-let Latency = 3 in {
-def : WriteRes<WriteSingleBit, [SiFive7PipeB]>;
-def : WriteRes<WriteSingleBitImm, [SiFive7PipeB]>;
-def : WriteRes<WriteBEXT, [SiFive7PipeAB]>;
-def : WriteRes<WriteBEXTI, [SiFive7PipeAB]>;
-}
+    // cpop[w] look exactly like multiply.
+    def : WriteRes<WriteCPOP, [PipeB]>;
+    def : WriteRes<WriteCPOP32, [PipeB]>;
 
-// Memory
-def : WriteRes<WriteSTB, [SiFive7PipeA]>;
-def : WriteRes<WriteSTH, [SiFive7PipeA]>;
-def : WriteRes<WriteSTW, [SiFive7PipeA]>;
-def : WriteRes<WriteSTD, [SiFive7PipeA]>;
-def : WriteRes<WriteFST16, [SiFive7PipeA]>;
-def : WriteRes<WriteFST32, [SiFive7PipeA]>;
-def : WriteRes<WriteFST64, [SiFive7PipeA]>;
-
-let Latency = 3 in {
-def : WriteRes<WriteLDB, [SiFive7PipeA]>;
-def : WriteRes<WriteLDH, [SiFive7PipeA]>;
-def : WriteRes<WriteLDW, [SiFive7PipeA]>;
-def : WriteRes<WriteLDD, [SiFive7PipeA]>;
-}
+    // orc.b is in the late-B ALU.
+    def : WriteRes<WriteORCB, [PipeB]>;
 
-let Latency = 2 in {
-def : WriteRes<WriteFLD16, [SiFive7PipeA]>;
-def : WriteRes<WriteFLD32, [SiFive7PipeA]>;
-def : WriteRes<WriteFLD64, [SiFive7PipeA]>;
-}
+    // min/max are in the late-B ALU
+    def : WriteRes<WriteIMinMax, [PipeB]>;
 
-// Atomic memory
-def : WriteRes<WriteAtomicSTW, [SiFive7PipeA]>;
-def : WriteRes<WriteAtomicSTD, [SiFive7PipeA]>;
+    // rev8 is in the late-A and late-B ALUs.
+    def : WriteRes<WriteREV8, [PipeAB]>;
 
-let Latency = 3 in {
-def : WriteRes<WriteAtomicW, [SiFive7PipeA]>;
-def : WriteRes<WriteAtomicD, [SiFive7PipeA]>;
-def : WriteRes<WriteAtomicLDW, [SiFive7PipeA]>;
-def : WriteRes<WriteAtomicLDD, [SiFive7PipeA]>;
-}
+    // shNadd[.uw] is on the early-B and late-B ALUs.
+    def : WriteRes<WriteSHXADD, [PipeB]>;
+    def : WriteRes<WriteSHXADD32, [PipeB]>;
+  }
 
-// Half precision.
-let Latency = 5 in {
-def : WriteRes<WriteFAdd16, [SiFive7PipeB]>;
-def : WriteRes<WriteFMul16, [SiFive7PipeB]>;
-def : WriteRes<WriteFMA16, [SiFive7PipeB]>;
-}
-let Latency = 3 in {
-def : WriteRes<WriteFSGNJ16, [SiFive7PipeB]>;
-def : WriteRes<WriteFMinMax16, [SiFive7PipeB]>;
-}
+  // Single-bit instructions
+  // BEXT[I] instruction is available on all ALUs and the other instructions
+  // are only available on the B pipe.
+  let Latency = 3 in {
+    def : WriteRes<WriteSingleBit, [PipeB]>;
+    def : WriteRes<WriteSingleBitImm, [PipeB]>;
+    def : WriteRes<WriteBEXT, [PipeAB]>;
+    def : WriteRes<WriteBEXTI, [PipeAB]>;
+  }
 
-let Latency = 14, ReleaseAtCycles = [1, 13] in {
-def :  WriteRes<WriteFDiv16, [SiFive7PipeB, SiFive7FDiv]>;
-def :  WriteRes<WriteFSqrt16, [SiFive7PipeB, SiFive7FDiv]>;
-}
+  // Memory
+  def : WriteRes<WriteSTB, [PipeA]>;
+  def : WriteRes<WriteSTH, [PipeA]>;
+  def : WriteRes<WriteSTW, [PipeA]>;
+  def : WriteRes<WriteSTD, [PipeA]>;
+  def : WriteRes<WriteFST16, [PipeA]>;
+  def : WriteRes<WriteFST32, [PipeA]>;
+  def : WriteRes<WriteFST64, [PipeA]>;
+
+  let Latency = 3 in {
+  def : WriteRes<WriteLDB, [PipeA]>;
+  def : WriteRes<WriteLDH, [PipeA]>;
+  def : WriteRes<WriteLDW, [PipeA]>;
+  def : WriteRes<WriteLDD, [PipeA]>;
+  }
 
-// Single precision.
-let Latency = 5 in {
-def : WriteRes<WriteFAdd32, [SiFive7PipeB]>;
-def : WriteRes<WriteFMul32, [SiFive7PipeB]>;
-def : WriteRes<WriteFMA32, [SiFive7PipeB]>;
-}
-let Latency = 3 in {
-def : WriteRes<WriteFSGNJ32, [SiFive7PipeB]>;
-def : WriteRes<WriteFMinMax32, [SiFive7PipeB]>;
-}
+  let Latency = 2 in {
+  def : WriteRes<WriteFLD16, [PipeA]>;
+  def : WriteRes<WriteFLD32, [PipeA]>;
+  def : WriteRes<WriteFLD64, [PipeA]>;
+  }
 
-def : WriteRes<WriteFDiv32, [SiFive7PipeB, SiFive7FDiv]> { let Latency = 27;
-                                                         let ReleaseAtCycles = [1, 26]; }
-def : WriteRes<WriteFSqrt32, [SiFive7PipeB, SiFive7FDiv]> { let Latency = 27;
-                                                          let ReleaseAtCycles = [1, 26]; }
+  // Atomic memory
+  def : WriteRes<WriteAtomicSTW, [PipeA]>;
+  def : WriteRes<WriteAtomicSTD, [PipeA]>;
 
-// Double precision
-let Latency = 7 in {
-def : WriteRes<WriteFAdd64, [SiFive7PipeB]>;
-def : WriteRes<WriteFMul64, [SiFive7PipeB]>;
-def : WriteRes<WriteFMA64, [SiFive7PipeB]>;
-}
-let Latency = 3 in {
-def : WriteRes<WriteFSGNJ64, [SiFive7PipeB]>;
-def : WriteRes<WriteFMinMax64, [SiFive7PipeB]>;
-}
+  let Latency = 3 in {
+  def : WriteRes<WriteAtomicW, [PipeA]>;
+  def : WriteRes<WriteAtomicD, [PipeA]>;
+  def : WriteRes<WriteAtomicLDW, [PipeA]>;
+  def : WriteRes<WriteAtomicLDD, [PipeA]>;
+  }
 
-def : WriteRes<WriteFDiv64, [SiFive7PipeB, SiFive7FDiv]> { let Latency = 56;
-                                                         let ReleaseAtCycles = [1, 55]; }
-def : WriteRes<WriteFSqrt64, [SiFive7PipeB, SiFive7FDiv]> { let Latency = 56;
-                                                          let ReleaseAtCycles = [1, 55]; }
-
-// Conversions
-let Latency = 3 in {
-def : WriteRes<WriteFCvtI32ToF16, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtI32ToF32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtI32ToF64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtI64ToF16, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtI64ToF32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtI64ToF64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF16ToI32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF16ToI64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF16ToF32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF16ToF64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF32ToI32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF32ToI64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF32ToF16, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF32ToF64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF64ToI32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF64ToI64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF64ToF16, [SiFive7PipeB]>;
-def : WriteRes<WriteFCvtF64ToF32, [SiFive7PipeB]>;
-
-def : WriteRes<WriteFClass16, [SiFive7PipeB]>;
-def : WriteRes<WriteFClass32, [SiFive7PipeB]>;
-def : WriteRes<WriteFClass64, [SiFive7PipeB]>;
-def : WriteRes<WriteFCmp16, [SiFive7PipeB]>;
-def : WriteRes<WriteFCmp32, [SiFive7PipeB]>;
-def : WriteRes<WriteFCmp64, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovI16ToF16, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovF16ToI16, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovI32ToF32, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovF32ToI32, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovI64ToF64, [SiFive7PipeB]>;
-def : WriteRes<WriteFMovF64ToI64, [SiFive7PipeB]>;
-}
+  // Half precision.
+  let Latency = 5 in {
+  def : WriteRes<WriteFAdd16, [PipeB]>;
+  def : WriteRes<WriteFMul16, [PipeB]>;
+  def : WriteRes<WriteFMA16, [PipeB]>;
+  }
+  let Latency = 3 in {
+  def : WriteRes<WriteFSGNJ16, [PipeB]>;
+  def : WriteRes<WriteFMinMax16, [PipeB]>;
+  }
 
-// 6. Configuration-Setting Instructions
-let Latency = 3 in {
-def : WriteRes<WriteVSETVLI, [SiFive7PipeA]>;
-def : WriteRes<WriteVSETIVLI, [SiFive7PipeA]>;
-def : WriteRes<WriteVSETVL, [SiFive7PipeA]>;
-}
+  let Latency = 14, ReleaseAtCycles = [1, 13] in {
+  def :  WriteRes<WriteFDiv16, [PipeB, FDiv]>;
+  def :  WriteRes<WriteFSqrt16, [PipeB, FDiv]>;
+  }
 
-// 7. Vector Loads and Stores
-// Unit-stride loads and stores can operate at the full bandwidth of the memory
-// pipe. The memory pipe is DLEN bits wide on x280.
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVLDE",    [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVLDFF",   [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+  // Single precision.
+  let Latency = 5 in {
+    def : WriteRes<WriteFAdd32, [PipeB]>;
+    def : WriteRes<WriteFMul32, [PipeB]>;
+    def : WriteRes<WriteFMA32, [PipeB]>;
+  }
+  let Latency = 3 in {
+    def : WriteRes<WriteFSGNJ32, [PipeB]>;
+    def : WriteRes<WriteFMinMax32, [PipeB]>;
   }
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-  defm "" : LMULWriteResMX<"WriteVSTE",    [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-}
 
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetMaskLoadStoreCycles<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-  defm "" : LMULWriteResMX<"WriteVLDM",    [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-  defm "" : LMULWriteResMX<"WriteVSTM",    [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-}
+  def : WriteRes<WriteFDiv32, [PipeB, FDiv]> {
+    let Latency = 27;
+    let ReleaseAtCycles = [1, 26];
+  }
+  def : WriteRes<WriteFSqrt32, [PipeB, FDiv]> {
+    let Latency = 27;
+    let ReleaseAtCycles = [1, 26];
+  }
 
-// Strided loads and stores operate at one element per cycle and should be
-// scheduled accordingly. Indexed loads and stores operate at one element per
-// cycle, and they stall the machine until all addresses have been generated,
-// so they cannot be scheduled. Indexed and strided loads and stores have LMUL
-// specific suffixes, but since SEW is already encoded in the name of the
-// resource, we do not need to use LMULSEWXXX constructors. However, we do
-// use the SEW from the name to determine the number of Cycles.
-
-foreach mx = SchedMxList in {
-  defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 8, SiFive7VLEN>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  defm SiFive7 : LMULWriteResMXVariant<"WriteVLDS8",  VLDSX0Pred, [SiFive7VCQ, SiFive7VL],
-                                       4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
-                                       [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
-  let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVLDUX8", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVLDOX8", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+  // Double precision
+  let Latency = 7 in {
+    def : WriteRes<WriteFAdd64, [PipeB]>;
+    def : WriteRes<WriteFMul64, [PipeB]>;
+    def : WriteRes<WriteFMA64, [PipeB]>;
   }
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSTS8",  [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTUX8", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTOX8", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+  let Latency = 3 in {
+    def : WriteRes<WriteFSGNJ64, [PipeB]>;
+    def : WriteRes<WriteFMinMax64, [PipeB]>;
   }
-}
-// TODO: The MxLists need to be filtered by EEW. We only need to support
-// LMUL >= SEW_min/ELEN. Here, the smallest EEW prevents us from having MF8
-// since LMUL >= 16/64.
-foreach mx = ["MF4", "MF2", "M1", "M2", "M4", "M8"] in {
-  defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 16, SiFive7VLEN>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  defm SiFive7 : LMULWriteResMXVariant<"WriteVLDS16",  VLDSX0Pred, [SiFive7VCQ, SiFive7VL],
-                                       4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
-                                       [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
-  let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVLDUX16", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVLDOX16", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+
+  def : WriteRes<WriteFDiv64, [PipeB, FDiv]> {
+    let Latency = 56;
+    let ReleaseAtCycles = [1, 55];
   }
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSTS16",  [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTUX16", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTOX16", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+  def : WriteRes<WriteFSqrt64, [PipeB, FDiv]> {
+    let Latency = 56;
+    let ReleaseAtCycles = [1, 55];
   }
-}
-foreach mx = ["MF2", "M1", "M2", "M4", "M8"] in {
-  defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 32, SiFive7VLEN>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  defm SiFive7 : LMULWriteResMXVariant<"WriteVLDS32",  VLDSX0Pred, [SiFive7VCQ, SiFive7VL],
-                                       4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
-                                       [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
-  let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVLDUX32", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVLDOX32", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+
+  // Conversions
+  let Latency = 3 in {
+  def : WriteRes<WriteFCvtI32ToF16, [PipeB]>;
+  def : WriteRes<WriteFCvtI32ToF32, [PipeB]>;
+  def : WriteRes<WriteFCvtI32ToF64, [PipeB]>;
+  def : WriteRes<WriteFCvtI64ToF16, [PipeB]>;
+  def : WriteRes<WriteFCvtI64ToF32, [PipeB]>;
+  def : WriteRes<WriteFCvtI64ToF64, [PipeB]>;
+  def : WriteRes<WriteFCvtF16ToI32, [PipeB]>;
+  def : WriteRes<WriteFCvtF16ToI64, [PipeB]>;
+  def : WriteRes<WriteFCvtF16ToF32, [PipeB]>;
+  def : WriteRes<WriteFCvtF16ToF64, [PipeB]>;
+  def : WriteRes<WriteFCvtF32ToI32, [PipeB]>;
+  def : WriteRes<WriteFCvtF32ToI64, [PipeB]>;
+  def : WriteRes<WriteFCvtF32ToF16, [PipeB]>;
+  def : WriteRes<WriteFCvtF32ToF64, [PipeB]>;
+  def : WriteRes<WriteFCvtF64ToI32, [PipeB]>;
+  def : WriteRes<WriteFCvtF64ToI64, [PipeB]>;
+  def : WriteRes<WriteFCvtF64ToF16, [PipeB]>;
+  def : WriteRes<WriteFCvtF64ToF32, [PipeB]>;
+
+  def : WriteRes<WriteFClass16, [PipeB]>;
+  def : WriteRes<WriteFClass32, [PipeB]>;
+  def : WriteRes<WriteFClass64, [PipeB]>;
+  def : WriteRes<WriteFCmp16, [PipeB]>;
+  def : WriteRes<WriteFCmp32, [PipeB]>;
+  def : WriteRes<WriteFCmp64, [PipeB]>;
+  def : WriteRes<WriteFMovI16ToF16, [PipeB]>;
+  def : WriteRes<WriteFMovF16ToI16, [PipeB]>;
+  def : WriteRes<WriteFMovI32ToF32, [PipeB]>;
+  def : WriteRes<WriteFMovF32ToI32, [PipeB]>;
+  def : WriteRes<WriteFMovI64ToF64, [PipeB]>;
+  def : WriteRes<WriteFMovF64ToI64, [PipeB]>;
   }
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSTS32",  [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTUX32", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTOX32", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+
+  // 6. Configuration-Setting Instructions
+  let Latency = 3 in {
+  def : WriteRes<WriteVSETVLI, [PipeA]>;
+  def : WriteRes<WriteVSETIVLI, [PipeA]>;
+  def : WriteRes<WriteVSETVL, [PipeA]>;
   }
-}
-foreach mx = ["M1", "M2", "M4", "M8"] in {
-  defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 64, SiFive7VLEN>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  defm SiFive7 : LMULWriteResMXVariant<"WriteVLDS64",  VLDSX0Pred, [SiFive7VCQ, SiFive7VL],
-                                       4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
-                                       [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
-  let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVLDUX64", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVLDOX64", [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+
+  // 7. Vector Loads and Stores
+  // Unit-stride loads and stores can operate at the full bandwidth of the memory
+  // pipe. The memory pipe is DLEN bits wide on x280.
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVLDE",    [VCQ, VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDFF",   [VCQ, VL], mx, IsWorstCase>;
+    }
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+    defm : LMULWriteResMX<"WriteVSTE",    [VCQ, VS], mx, IsWorstCase>;
   }
-  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSTS64",  [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTUX64", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSTOX64", [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetMaskLoadStoreCycles<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+    defm : LMULWriteResMX<"WriteVLDM",    [VCQ, VL], mx, IsWorstCase>;
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+    defm : LMULWriteResMX<"WriteVSTM",    [VCQ, VS], mx, IsWorstCase>;
   }
-}
 
-// VLD*R is LMUL aware
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
-  def : WriteRes<WriteVLD1R,  [SiFive7VCQ, SiFive7VL]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
-  def : WriteRes<WriteVLD2R,  [SiFive7VCQ, SiFive7VL]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
-  def : WriteRes<WriteVLD4R,  [SiFive7VCQ, SiFive7VL]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
-  def : WriteRes<WriteVLD8R,  [SiFive7VCQ, SiFive7VL]>;
-// VST*R is LMUL aware
-let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
-  def : WriteRes<WriteVST1R,   [SiFive7VCQ, SiFive7VS]>;
-let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
-  def : WriteRes<WriteVST2R,   [SiFive7VCQ, SiFive7VS]>;
-let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
-  def : WriteRes<WriteVST4R,   [SiFive7VCQ, SiFive7VS]>;
-let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
-  def : WriteRes<WriteVST8R,   [SiFive7VCQ, SiFive7VS]>;
-
-// Segmented Loads and Stores
-// Unit-stride segmented loads and stores are effectively converted into strided
-// segment loads and stores. Strided segment loads and stores operate at up to
-// one segment per cycle if the segment fits within one aligned memory beat.
-// Indexed segment loads and stores operate at the same rate as strided ones,
-// but they stall the machine until all addresses have been generated.
-foreach mx = SchedMxList in {
-  foreach eew = [8, 16, 32, 64] in {
-    defvar Cycles = SiFive7GetCyclesSegmentedSeg2<mx>.c;
+  // Strided loads and stores operate at one element per cycle and should be
+  // scheduled accordingly. Indexed loads and stores operate at one element per
+  // cycle, and they stall the machine until all addresses have been generated,
+  // so they cannot be scheduled. Indexed and strided loads and stores have LMUL
+  // specific suffixes, but since SEW is already encoded in the name of the
+  // resource, we do not need to use LMULSEWXXX constructors. However, we do
+  // use the SEW from the name to determine the number of Cycles.
+
+  foreach mx = SchedMxList in {
+    defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 8, VLEN>.c;
     defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-    // Does not chain so set latency high
+    defm  : LMULWriteResMXVariant<"WriteVLDS8",  VLDSX0Pred, [VCQ, VL],
+                                         4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
+                                         [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
     let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULWriteResMX<"WriteVLSEG2e" # eew,   [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-      defm "" : LMULWriteResMX<"WriteVLSEGFF2e" # eew, [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDUX8", [VCQ, VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDOX8", [VCQ, VL], mx, IsWorstCase>;
     }
-    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-    defm "" : LMULWriteResMX<"WriteVSSEG2e" # eew,   [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-    foreach nf=3-8 in {
-      defvar Cycles = SiFive7GetCyclesSegmented<mx, eew, nf, SiFive7VLEN>.c;
-      defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-      // Does not chain so set latency high
-      let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-        defm "" : LMULWriteResMX<"WriteVLSEG" # nf # "e" # eew,   [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-        defm "" : LMULWriteResMX<"WriteVLSEGFF" # nf # "e" # eew, [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-      }
-      let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-      defm "" : LMULWriteResMX<"WriteVSSEG" # nf # "e" # eew,   [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSTS8",  [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTUX8", [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTOX8", [VCQ, VS], mx, IsWorstCase>;
     }
   }
-}
-foreach mx = SchedMxList in {
-  foreach nf=2-8 in {
+  // TODO: The MxLists need to be filtered by EEW. We only need to support
+  // LMUL >= SEW_min/ELEN. Here, the smallest EEW prevents us from having MF8
+  // since LMUL >= 16/64.
+  foreach mx = ["MF4", "MF2", "M1", "M2", "M4", "M8"] in {
+    defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 16, VLEN>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    defm  : LMULWriteResMXVariant<"WriteVLDS16",  VLDSX0Pred, [VCQ, VL],
+                                         4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
+                                         [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
+    let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVLDUX16", [VCQ, VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDOX16", [VCQ, VL], mx, IsWorstCase>;
+    }
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSTS16",  [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTUX16", [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTOX16", [VCQ, VS], mx, IsWorstCase>;
+    }
+  }
+  foreach mx = ["MF2", "M1", "M2", "M4", "M8"] in {
+    defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 32, VLEN>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    defm  : LMULWriteResMXVariant<"WriteVLDS32",  VLDSX0Pred, [VCQ, VL],
+                                         4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
+                                         [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
+    let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVLDUX32", [VCQ, VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDOX32", [VCQ, VL], mx, IsWorstCase>;
+    }
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSTS32",  [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTUX32", [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTOX32", [VCQ, VS], mx, IsWorstCase>;
+    }
+  }
+  foreach mx = ["M1", "M2", "M4", "M8"] in {
+    defvar VLDSX0Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar Cycles = SiFive7GetCyclesOnePerElement<mx, 64, VLEN>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    defm  : LMULWriteResMXVariant<"WriteVLDS64",  VLDSX0Pred, [VCQ, VL],
+                                         4, [0, 1], [1, !add(1, VLDSX0Cycles)], !add(3, Cycles),
+                                         [0, 1], [1, !add(1, Cycles)], mx, IsWorstCase>;
+    let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVLDUX64", [VCQ, VL], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVLDOX64", [VCQ, VL], mx, IsWorstCase>;
+    }
+    let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSTS64",  [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTUX64", [VCQ, VS], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSTOX64", [VCQ, VS], mx, IsWorstCase>;
+    }
+  }
+
+  // VLD*R is LMUL aware
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
+    def : WriteRes<WriteVLD1R,  [VCQ, VL]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
+    def : WriteRes<WriteVLD2R,  [VCQ, VL]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
+    def : WriteRes<WriteVLD4R,  [VCQ, VL]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
+    def : WriteRes<WriteVLD8R,  [VCQ, VL]>;
+  // VST*R is LMUL aware
+  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
+    def : WriteRes<WriteVST1R,   [VCQ, VS]>;
+  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
+    def : WriteRes<WriteVST2R,   [VCQ, VS]>;
+  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
+    def : WriteRes<WriteVST4R,   [VCQ, VS]>;
+  let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
+    def : WriteRes<WriteVST8R,   [VCQ, VS]>;
+
+  // Segmented Loads and Stores
+  // Unit-stride segmented loads and stores are effectively converted into strided
+  // segment loads and stores. Strided segment loads and stores operate at up to
+  // one segment per cycle if the segment fits within one aligned memory beat.
+  // Indexed segment loads and stores operate at the same rate as strided ones,
+  // but they stall the machine until all addresses have been generated.
+  foreach mx = SchedMxList in {
     foreach eew = [8, 16, 32, 64] in {
-      defvar Cycles = SiFive7GetCyclesSegmented<mx, eew, nf, SiFive7VLEN>.c;
+      defvar Cycles = SiFive7GetCyclesSegmentedSeg2<mx>.c;
       defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
       // Does not chain so set latency high
       let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-        defm "" : LMULWriteResMX<"WriteVLSSEG" # nf # "e" # eew,  [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-        defm "" : LMULWriteResMX<"WriteVLUXSEG" # nf # "e" # eew, [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
-        defm "" : LMULWriteResMX<"WriteVLOXSEG" # nf # "e" # eew, [SiFive7VCQ, SiFive7VL], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVLSEG2e" # eew,   [VCQ, VL], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVLSEGFF2e" # eew, [VCQ, VL], mx, IsWorstCase>;
       }
-      let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-        defm "" : LMULWriteResMX<"WriteVSSSEG" # nf # "e" # eew,  [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-        defm "" : LMULWriteResMX<"WriteVSUXSEG" # nf # "e" # eew, [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
-        defm "" : LMULWriteResMX<"WriteVSOXSEG" # nf # "e" # eew, [SiFive7VCQ, SiFive7VS], mx, IsWorstCase>;
+      let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+      defm : LMULWriteResMX<"WriteVSSEG2e" # eew,   [VCQ, VS], mx, IsWorstCase>;
+      foreach nf=3-8 in {
+        defvar Cycles = SiFive7GetCyclesSegmented<mx, eew, nf, VLEN>.c;
+        defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+        // Does not chain so set latency high
+        let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+          defm : LMULWriteResMX<"WriteVLSEG" # nf # "e" # eew,   [VCQ, VL], mx, IsWorstCase>;
+          defm : LMULWriteResMX<"WriteVLSEGFF" # nf # "e" # eew, [VCQ, VL], mx, IsWorstCase>;
+        }
+        let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+        defm : LMULWriteResMX<"WriteVSSEG" # nf # "e" # eew,   [VCQ, VS], mx, IsWorstCase>;
       }
     }
   }
-}
-
-// 11. Vector Integer Arithmetic Instructions
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVIALUV",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIALUX",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIALUI",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUX",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUI",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUMV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUMX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICALUMI",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMinMaxV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMinMaxX",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMergeV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMergeX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMergeI",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMovV",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMovX",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMovI",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-  }
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVShiftV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVShiftX",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVShiftI",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMulV",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMulX",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMulAddV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIMulAddX",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-  }
-  // Mask results can't chain.
-  let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVICmpV",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICmpX",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVICmpI",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  foreach mx = SchedMxList in {
+    foreach nf=2-8 in {
+      foreach eew = [8, 16, 32, 64] in {
+        defvar Cycles = SiFive7GetCyclesSegmented<mx, eew, nf, VLEN>.c;
+        defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+        // Does not chain so set latency high
+        let Latency = !add(3, Cycles), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+          defm : LMULWriteResMX<"WriteVLSSEG" # nf # "e" # eew,  [VCQ, VL], mx, IsWorstCase>;
+          defm : LMULWriteResMX<"WriteVLUXSEG" # nf # "e" # eew, [VCQ, VL], mx, IsWorstCase>;
+          defm : LMULWriteResMX<"WriteVLOXSEG" # nf # "e" # eew, [VCQ, VL], mx, IsWorstCase>;
+        }
+        let Latency = 1, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+          defm : LMULWriteResMX<"WriteVSSSEG" # nf # "e" # eew,  [VCQ, VS], mx, IsWorstCase>;
+          defm : LMULWriteResMX<"WriteVSUXSEG" # nf # "e" # eew, [VCQ, VS], mx, IsWorstCase>;
+          defm : LMULWriteResMX<"WriteVSOXSEG" # nf # "e" # eew, [VCQ, VS], mx, IsWorstCase>;
+        }
+      }
+    }
   }
-}
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVExtV",      [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+
+  // 11. Vector Integer Arithmetic Instructions
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVIALUV",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIALUX",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIALUI",     [VCQ, VA], mx, IsWorstCase>;
+      // vmadc requires mask
+      defm : LMULWriteResMX<"WriteVICALUV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICALUX",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICALUI",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICALUMV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICALUMX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICALUMI",   [VCQ, VA], mx, IsWorstCase>;
+      // min max require merge
+      defm : LMULWriteResMX<"WriteVIMinMaxV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMinMaxX",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMergeV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMergeX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMergeI",   [VCQ, VA], mx, IsWorstCase>;
+
+      defm : LMULWriteResMX<"WriteVIMovV",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMovX",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMovI",     [VCQ, VA], mx, IsWorstCase>;
+
+      defm : LMULWriteResMX<"WriteVExtV",      [VCQ, VA], mx, IsWorstCase>;
+    }
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVShiftV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVShiftX",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVShiftI",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMulV",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMulX",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMulAddV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIMulAddX",  [VCQ, VA], mx, IsWorstCase>;
+    }
+    // Mask results can't chain.
+    let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVICmpV",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICmpX",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVICmpI",     [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
-foreach mx = SchedMxList in {
-  foreach sew = SchedSEWSet<mx>.val in {
-    defvar Cycles = !mul(SiFive7GetDivOrSqrtFactor<sew>.c,
-                         !div(SiFive7GetCyclesOnePerElement<mx, sew, SiFive7VLEN>.c, 4));
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
-    let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVIDivV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVIDivX", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+
+  foreach mx = SchedMxList in {
+    foreach sew = SchedSEWSet<mx>.val in {
+      defvar Cycles = !mul(SiFive7GetDivOrSqrtFactor<sew>.c,
+                           !div(SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c, 4));
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
+      let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVIDivV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVIDivX", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
     }
   }
-}
 
-// Widening
-foreach mx = SchedMxListW in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVIWALUV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWALUX",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWALUI",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWMulV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWMulX",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWMulAddV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIWMulAddX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // Widening
+  foreach mx = SchedMxListW in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVIWALUV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWALUX",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWALUI",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWMulV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWMulX",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWMulAddV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIWMulAddX", [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
-// Narrowing
-foreach mx = SchedMxListW in {
-  defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVNShiftV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVNShiftX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVNShiftI",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // Narrowing
+  foreach mx = SchedMxListW in {
+    defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVNShiftV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVNShiftX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVNShiftI",   [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
 
-// 12. Vector Fixed-Point Arithmetic Instructions
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSALUV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSALUX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSALUI",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVAALUV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVAALUX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSMulV",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSMulX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSShiftV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSShiftX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSShiftI", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // 12. Vector Fixed-Point Arithmetic Instructions
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSALUV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSALUX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSALUI",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVAALUV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVAALUX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSMulV",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSMulX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSShiftV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSShiftX", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSShiftI", [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
-// Narrowing
-foreach mx = SchedMxListW in {
-  defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVNClipV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVNClipX",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVNClipI",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // Narrowing
+  foreach mx = SchedMxListW in {
+    defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVNClipV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVNClipX",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVNClipI",  [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
 
-// 13. Vector Floating-Point Instructions
-foreach mx = SchedMxListF in {
-  foreach sew = SchedSEWSet<mx, isF=1>.val in {
+  // 13. Vector Floating-Point Instructions
+  foreach mx = SchedMxListF in {
+    foreach sew = SchedSEWSet<mx, isF=1>.val in {
+      defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, isF=1>.c;
+      let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFALUV",  [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFALUF",  [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFMulV",  [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFMulF",  [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFMulAddV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFMulAddF", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFRecpV",   [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFCvtIToFV", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
+      let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFSgnjV",   [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFSgnjF",   [VCQ, VA], mx, sew, IsWorstCase>;
+        // min max require merge
+        defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFMinMaxF", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
+    }
+  }
+  foreach mx = SchedMxList in {
     defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, isF=1>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
     let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFALUV",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFALUF",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMulV",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMulF",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMulAddV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMulAddF", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFRecpV",   [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFCvtIToFV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFCvtFToIV",  [VCQ, VA], mx, IsWorstCase>;
     }
     let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMinMaxV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFMinMaxF", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFSgnjV",   [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFSgnjF",   [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFClassV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFMergeV",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFMovV",      [VCQ, VA], mx, IsWorstCase>;
+    }
+    // Mask results can't chain.
+    let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      // fcmp requires mask
+      defm : LMULWriteResMX<"WriteVFCmpV",      [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFCmpF",      [VCQ, VA], mx, IsWorstCase>;
     }
   }
-}
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVFCvtFToIV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-  }
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVFClassV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVFMergeV",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVFMovV",      [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-  }
-  // Mask results can't chain.
-  let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVFCmpV",      [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVFCmpF",      [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-  }
-}
-foreach mx = SchedMxListF in {
-  foreach sew = SchedSEWSet<mx, isF=1>.val in {
-    defvar Cycles = !mul(SiFive7GetDivOrSqrtFactor<sew>.c,
-                         !div(SiFive7GetCyclesOnePerElement<mx, sew, SiFive7VLEN>.c, 4));
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, 1>.c;
-    let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFSqrtV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFDivV",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFDivF",  [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+  foreach mx = SchedMxListF in {
+    foreach sew = SchedSEWSet<mx, isF=1>.val in {
+      defvar Cycles = !mul(SiFive7GetDivOrSqrtFactor<sew>.c,
+                           !div(SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c, 4));
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, 1>.c;
+      let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFSqrtV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFDivV",  [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFDivF",  [VCQ, VA], mx, sew, IsWorstCase>;
+      }
     }
   }
-}
 
-// Widening
-foreach mx = SchedMxListW in {
-  foreach sew = SchedSEWSet<mx, isF=0, isWidening=1>.val in {
+  // Widening
+  foreach mx = SchedMxListW in {
+    foreach sew = SchedSEWSet<mx, isF=0, isWidening=1>.val in {
+      defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListW>.c;
+      let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+      defm : LMULSEWWriteResMXSEW<"WriteVFWCvtIToFV", [VCQ, VA], mx, sew, IsWorstCase>;
+    }
+  }
+  foreach mx = SchedMxListFW in {
+    foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
+      defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
+      let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFWALUV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFWALUF", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFWMulV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFWMulF", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFWMulAddV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFWMulAddF", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
+      defvar CvtCycles = SiFive7GetCyclesDefault<mx>.c;
+      let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, CvtCycles)] in
+      defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtFToFV", [VCQ, VA], mx, sew, IsWorstCase>;
+    }
     defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListW>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListFW>.c;
     let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-    defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtIToFV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+    defm : LMULWriteResMX<"WriteVFWCvtFToIV", [VCQ, VA], mx, IsWorstCase>;
   }
-}
-foreach mx = SchedMxListFW in {
-  foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
-    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
+  // Narrowing
+  foreach mx = SchedMxListW in {
+    defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
     let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWALUV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWALUF", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulF", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulAddV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWMulAddF", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFWCvtFToFV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFNCvtFToIV", [VCQ, VA], mx, IsWorstCase>;
     }
   }
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListFW>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-  defm "" : LMULWriteResMX<"WriteVFWCvtFToIV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-}
-// Narrowing
-foreach mx = SchedMxListW in {
-  defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxListW>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVFNCvtFToIV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  foreach mx = SchedMxListFW in {
+    foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
+      defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
+      let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFNCvtIToFV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFNCvtFToFV", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
+    }
   }
-}
-foreach mx = SchedMxListFW in {
-  foreach sew = SchedSEWSet<mx, isF=1, isWidening=1>.val in {
-    defvar Cycles = SiFive7GetCyclesNarrowing<mx>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFW, isF=1>.c;
-    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFNCvtIToFV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFNCvtFToFV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+
+  // 14. Vector Reduction Operations
+  foreach mx = SchedMxList in {
+    foreach sew = SchedSEWSet<mx>.val in {
+      defvar Cycles = SiFive7GetReductionCycles<mx, sew, VLEN>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
+      let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVIRedV_From", [VCQ, VA],
+                                       mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVIRedMinMaxV_From", [VCQ, VA],
+                                       mx, sew, IsWorstCase>;
+      }
     }
   }
-}
 
-// 14. Vector Reduction Operations
-foreach mx = SchedMxList in {
-  foreach sew = SchedSEWSet<mx>.val in {
-    defvar Cycles = SiFive7GetReductionCycles<mx, sew, SiFive7VLEN>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
-    let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVIRedV_From", [SiFive7VCQ, SiFive7VA],
-                                     mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVIRedMinMaxV_From", [SiFive7VCQ, SiFive7VA],
+  foreach mx = SchedMxListWRed in {
+    foreach sew = SchedSEWSet<mx, 0, 1>.val in {
+      defvar Cycles = SiFive7GetReductionCycles<mx, sew, VLEN>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListWRed>.c;
+      let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
+      defm : LMULSEWWriteResMXSEW<"WriteVIWRedV_From", [VCQ, VA],
                                      mx, sew, IsWorstCase>;
     }
   }
-}
 
-foreach mx = SchedMxListWRed in {
-  foreach sew = SchedSEWSet<mx, 0, 1>.val in {
-    defvar Cycles = SiFive7GetReductionCycles<mx, sew, SiFive7VLEN>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListWRed>.c;
-    let Latency = Cycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in
-    defm "" : LMULSEWWriteResMXSEW<"WriteVIWRedV_From", [SiFive7VCQ, SiFive7VA],
-                                   mx, sew, IsWorstCase>;
+  foreach mx = SchedMxListF in {
+    foreach sew = SchedSEWSet<mx, 1>.val in {
+      defvar RedCycles = SiFive7GetReductionCycles<mx, sew, VLEN>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, 1>.c;
+      let Latency = RedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, RedCycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVFRedV_From", [VCQ, VA],
+                                       mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVFRedMinMaxV_From", [VCQ, VA],
+                                       mx, sew, IsWorstCase>;
+      }
+      defvar OrdRedCycles = SiFive7GetOrderedReductionCycles<mx, sew, VLEN>.c;
+      let Latency = OrdRedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, OrdRedCycles)] in
+      defm : LMULSEWWriteResMXSEW<"WriteVFRedOV_From", [VCQ, VA],
+                                     mx, sew, IsWorstCase>;
+    }
   }
-}
 
-foreach mx = SchedMxListF in {
-  foreach sew = SchedSEWSet<mx, 1>.val in {
-    defvar RedCycles = SiFive7GetReductionCycles<mx, sew, SiFive7VLEN>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListF, 1>.c;
-    let Latency = RedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, RedCycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFRedV_From", [SiFive7VCQ, SiFive7VA],
+  foreach mx = SchedMxListFWRed in {
+    foreach sew = SchedSEWSet<mx, 1, 1>.val in {
+      defvar RedCycles = SiFive7GetReductionCycles<mx, sew, VLEN>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFWRed, 1>.c;
+      let Latency = RedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, RedCycles)] in
+      defm : LMULSEWWriteResMXSEW<"WriteVFWRedV_From", [VCQ, VA],
                                      mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVFRedMinMaxV_From", [SiFive7VCQ, SiFive7VA],
+      defvar OrdRedCycles = SiFive7GetOrderedReductionCycles<mx, sew, VLEN>.c;
+      let Latency = OrdRedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, OrdRedCycles)] in
+      defm : LMULSEWWriteResMXSEW<"WriteVFWRedOV_From", [VCQ, VA],
                                      mx, sew, IsWorstCase>;
     }
-    defvar OrdRedCycles = SiFive7GetOrderedReductionCycles<mx, sew, SiFive7VLEN>.c;
-    let Latency = OrdRedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, OrdRedCycles)] in
-    defm "" : LMULSEWWriteResMXSEW<"WriteVFRedOV_From", [SiFive7VCQ, SiFive7VA],
-                                   mx, sew, IsWorstCase>;
-  }
-}
-
-foreach mx = SchedMxListFWRed in {
-  foreach sew = SchedSEWSet<mx, 1, 1>.val in {
-    defvar RedCycles = SiFive7GetReductionCycles<mx, sew, SiFive7VLEN>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxListFWRed, 1>.c;
-    let Latency = RedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, RedCycles)] in
-    defm "" : LMULSEWWriteResMXSEW<"WriteVFWRedV_From", [SiFive7VCQ, SiFive7VA],
-                                   mx, sew, IsWorstCase>;
-    defvar OrdRedCycles = SiFive7GetOrderedReductionCycles<mx, sew, SiFive7VLEN>.c;
-    let Latency = OrdRedCycles, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, OrdRedCycles)] in
-    defm "" : LMULSEWWriteResMXSEW<"WriteVFWRedOV_From", [SiFive7VCQ, SiFive7VA],
-                                   mx, sew, IsWorstCase>;
   }
-}
 
-// 15. Vector Mask Instructions
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesVMask<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVMALUV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVMPopV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVMFFSV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVMSFSV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // 15. Vector Mask Instructions
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesVMask<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVMALUV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVMPopV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVMFFSV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVMSFSV", [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVIotaV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVIdxV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVIotaV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVIdxV", [VCQ, VA], mx, IsWorstCase>;
+    }
   }
-}
 
-// 16. Vector Permutation Instructions
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 1)] in {
-  def : WriteRes<WriteVMovSX, [SiFive7VCQ, SiFive7VA]>;
-  def : WriteRes<WriteVMovXS, [SiFive7VCQ, SiFive7VA]>;
-  def : WriteRes<WriteVMovSF, [SiFive7VCQ, SiFive7VA]>;
-  def : WriteRes<WriteVMovFS, [SiFive7VCQ, SiFive7VA]>;
-}
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVRGatherVX",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVRGatherVI",    [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  // 16. Vector Permutation Instructions
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 1)] in {
+    def : WriteRes<WriteVMovSX, [VCQ, VA]>;
+    def : WriteRes<WriteVMovXS, [VCQ, VA]>;
+    def : WriteRes<WriteVMovSF, [VCQ, VA]>;
+    def : WriteRes<WriteVMovFS, [VCQ, VA]>;
   }
-}
-
-foreach mx = SchedMxList in {
-  foreach sew = SchedSEWSet<mx>.val in {
-    defvar Cycles = SiFive7GetCyclesOnePerElement<mx, sew, SiFive7VLEN>.c;
-    defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
-    let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-      defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
-      defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [SiFive7VCQ, SiFive7VA], mx, sew, IsWorstCase>;
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 8, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVRGatherVX",    [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVRGatherVI",    [VCQ, VA], mx, IsWorstCase>;
     }
   }
-}
 
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVSlideUpX",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVSlideI",     [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVISlide1X",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVFSlide1F",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  foreach mx = SchedMxList in {
+    foreach sew = SchedSEWSet<mx>.val in {
+      defvar Cycles = SiFive7GetCyclesOnePerElement<mx, sew, VLEN>.c;
+      defvar IsWorstCase = SiFive7IsWorstCaseMXSEW<mx, sew, SchedMxList>.c;
+      let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+        defm : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [VCQ, VA], mx, sew, IsWorstCase>;
+        defm : LMULSEWWriteResMXSEW<"WriteVCompressV", [VCQ, VA], mx, sew, IsWorstCase>;
+      }
+    }
   }
-}
 
-// VMov*V is LMUL Aware
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
-  def : WriteRes<WriteVMov1V,     [SiFive7VCQ, SiFive7VA]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
-  def : WriteRes<WriteVMov2V,     [SiFive7VCQ, SiFive7VA]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
-  def : WriteRes<WriteVMov4V,     [SiFive7VCQ, SiFive7VA]>;
-let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
-  def : WriteRes<WriteVMov8V,     [SiFive7VCQ, SiFive7VA]>;
-
-// Others
-def : WriteRes<WriteCSR, [SiFive7PipeB]>;
-def : WriteRes<WriteNop, []>;
-let Latency = 3 in
-  def : WriteRes<WriteRdVLENB, [SiFive7PipeB]>;
-
-def : InstRW<[WriteIALU], (instrs COPY)>;
-
-// VCIX
-//
-// In principle we don't know the latency of any VCIX instructions (they
-// depends on a particular coprocessor implementation). However, the default
-// latency of 1 can lead to issues [1]. So instead we set the latency to the
-// default provided by `SiFive7GetCyclesDefault`. This is still not accurate
-// and can lead to suboptimal codegen, but should hopefully be a better
-// starting point.
-//
-// [1] https://github.com/llvm/llvm-project/issues/83391
-foreach mx = SchedMxList in {
-  defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
-  defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
-  let Latency = Cycles,
-      AcquireAtCycles = [0, 1],
-      ReleaseAtCycles = [1, !add(1, Cycles)] in {
-    defm "" : LMULWriteResMX<"WriteVC_V_I",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_X",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_IV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_VV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_XV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_IVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_IVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_VVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_VVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_XVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_V_XVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    foreach f = ["FPR16", "FPR32", "FPR64"] in {
-      defm "" : LMULWriteResMX<"WriteVC_V_" # f # "V",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-      defm "" : LMULWriteResMX<"WriteVC_V_" # f # "VV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-      defm "" : LMULWriteResMX<"WriteVC_V_" # f # "VW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVSlideUpX",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSlideDownX", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVSlideI",     [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVISlide1X",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVFSlide1F",   [VCQ, VA], mx, IsWorstCase>;
     }
-    defm "" : LMULWriteResMX<"WriteVC_I",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_X",   [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_IV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_VV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_XV",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_IVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_IVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_VVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_VVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_XVV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    defm "" : LMULWriteResMX<"WriteVC_XVW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-    foreach f = ["FPR16", "FPR32", "FPR64"] in {
-      defm "" : LMULWriteResMX<"WriteVC_" # f # "V",  [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-      defm "" : LMULWriteResMX<"WriteVC_" # f # "VV", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
-      defm "" : LMULWriteResMX<"WriteVC_" # f # "VW", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+  }
+
+  // VMov*V is LMUL Aware
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 2)] in
+    def : WriteRes<WriteVMov1V,     [VCQ, VA]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 4)] in
+    def : WriteRes<WriteVMov2V,     [VCQ, VA]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 8)] in
+    def : WriteRes<WriteVMov4V,     [VCQ, VA]>;
+  let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 16)] in
+    def : WriteRes<WriteVMov8V,     [VCQ, VA]>;
+
+  // Others
+  def : WriteRes<WriteCSR, [PipeB]>;
+  def : WriteRes<WriteNop, []>;
+  let Latency = 3 in
+    def : WriteRes<WriteRdVLENB, [PipeB]>;
+
+  def : InstRW<[WriteIALU], (instrs COPY)>;
+
+  // VCIX
+  //
+  // In principle we don't know the latency of any VCIX instructions (they
+  // depends on a particular coprocessor implementation). However, the default
+  // latency of 1 can lead to issues [1]. So instead we set the latency to the
+  // default provided by `SiFive7GetCyclesDefault`. This is still not accurate
+  // and can lead to suboptimal codegen, but should hopefully be a better
+  // starting point.
+  //
+  // [1] https://github.com/llvm/llvm-project/issues/83391
+  foreach mx = SchedMxList in {
+    defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
+    defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
+    let Latency = Cycles,
+        AcquireAtCycles = [0, 1],
+        ReleaseAtCycles = [1, !add(1, Cycles)] in {
+      defm : LMULWriteResMX<"WriteVC_V_I",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_X",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_IV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_VV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_XV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_IVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_IVW", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_VVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_VVW", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_XVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_V_XVW", [VCQ, VA], mx, IsWorstCase>;
+      foreach f = ["FPR16", "FPR32", "FPR64"] in {
+        defm : LMULWriteResMX<"WriteVC_V_" # f # "V",  [VCQ, VA], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVC_V_" # f # "VV", [VCQ, VA], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVC_V_" # f # "VW", [VCQ, VA], mx, IsWorstCase>;
+      }
+      defm : LMULWriteResMX<"WriteVC_I",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_X",   [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_IV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_VV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_XV",  [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_IVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_IVW", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_VVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_VVW", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_XVV", [VCQ, VA], mx, IsWorstCase>;
+      defm : LMULWriteResMX<"WriteVC_XVW", [VCQ, VA], mx, IsWorstCase>;
+      foreach f = ["FPR16", "FPR32", "FPR64"] in {
+        defm : LMULWriteResMX<"WriteVC_" # f # "V",  [VCQ, VA], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVC_" # f # "VV", [VCQ, VA], mx, IsWorstCase>;
+        defm : LMULWriteResMX<"WriteVC_" # f # "VW", [VCQ, VA], mx, IsWorstCase>;
+      }
     }
   }
 }
 
 //===----------------------------------------------------------------------===//
 
-// Bypass and advance
-def : SiFive7AnyToGPRBypass<ReadJmp>;
-def : SiFive7AnyToGPRBypass<ReadJalr>;
-def : ReadAdvance<ReadCSR, 0>;
-def : SiFive7AnyToGPRBypass<ReadStoreData>;
-def : ReadAdvance<ReadMemBase, 0>;
-def : SiFive7AnyToGPRBypass<ReadIALU>;
-def : SiFive7AnyToGPRBypass<ReadIALU32>;
-def : SiFive7AnyToGPRBypass<ReadShiftImm>;
-def : SiFive7AnyToGPRBypass<ReadShiftImm32>;
-def : SiFive7AnyToGPRBypass<ReadShiftReg>;
-def : SiFive7AnyToGPRBypass<ReadShiftReg32>;
-def : ReadAdvance<ReadIDiv, 0>;
-def : ReadAdvance<ReadIDiv32, 0>;
-def : ReadAdvance<ReadIRem, 0>;
-def : ReadAdvance<ReadIRem32, 0>;
-def : ReadAdvance<ReadIMul, 0>;
-def : ReadAdvance<ReadIMul32, 0>;
-def : ReadAdvance<ReadAtomicWA, 0>;
-def : ReadAdvance<ReadAtomicWD, 0>;
-def : ReadAdvance<ReadAtomicDA, 0>;
-def : ReadAdvance<ReadAtomicDD, 0>;
-def : ReadAdvance<ReadAtomicLDW, 0>;
-def : ReadAdvance<ReadAtomicLDD, 0>;
-def : ReadAdvance<ReadAtomicSTW, 0>;
-def : ReadAdvance<ReadAtomicSTD, 0>;
-def : ReadAdvance<ReadFStoreData, 0>;
-def : ReadAdvance<ReadFMemBase, 0>;
-def : ReadAdvance<ReadFAdd16, 0>;
-def : ReadAdvance<ReadFAdd32, 0>;
-def : ReadAdvance<ReadFAdd64, 0>;
-def : ReadAdvance<ReadFMul16, 0>;
-def : ReadAdvance<ReadFMA16, 0>;
-def : ReadAdvance<ReadFMA16Addend, 0>;
-def : ReadAdvance<ReadFMul32, 0>;
-def : ReadAdvance<ReadFMul64, 0>;
-def : ReadAdvance<ReadFMA32, 0>;
-def : ReadAdvance<ReadFMA32Addend, 0>;
-def : ReadAdvance<ReadFMA64, 0>;
-def : ReadAdvance<ReadFMA64Addend, 0>;
-def : ReadAdvance<ReadFDiv16, 0>;
-def : ReadAdvance<ReadFDiv32, 0>;
-def : ReadAdvance<ReadFDiv64, 0>;
-def : ReadAdvance<ReadFSqrt16, 0>;
-def : ReadAdvance<ReadFSqrt32, 0>;
-def : ReadAdvance<ReadFSqrt64, 0>;
-def : ReadAdvance<ReadFCmp16, 0>;
-def : ReadAdvance<ReadFCmp32, 0>;
-def : ReadAdvance<ReadFCmp64, 0>;
-def : ReadAdvance<ReadFSGNJ16, 0>;
-def : ReadAdvance<ReadFSGNJ32, 0>;
-def : ReadAdvance<ReadFSGNJ64, 0>;
-def : ReadAdvance<ReadFMinMax16, 0>;
-def : ReadAdvance<ReadFMinMax32, 0>;
-def : ReadAdvance<ReadFMinMax64, 0>;
-def : ReadAdvance<ReadFCvtF16ToI32, 0>;
-def : ReadAdvance<ReadFCvtF16ToI64, 0>;
-def : ReadAdvance<ReadFCvtF32ToI32, 0>;
-def : ReadAdvance<ReadFCvtF32ToI64, 0>;
-def : ReadAdvance<ReadFCvtF64ToI32, 0>;
-def : ReadAdvance<ReadFCvtF64ToI64, 0>;
-def : ReadAdvance<ReadFCvtI32ToF16, 0>;
-def : ReadAdvance<ReadFCvtI32ToF32, 0>;
-def : ReadAdvance<ReadFCvtI32ToF64, 0>;
-def : ReadAdvance<ReadFCvtI64ToF16, 0>;
-def : ReadAdvance<ReadFCvtI64ToF32, 0>;
-def : ReadAdvance<ReadFCvtI64ToF64, 0>;
-def : ReadAdvance<ReadFCvtF32ToF64, 0>;
-def : ReadAdvance<ReadFCvtF64ToF32, 0>;
-def : ReadAdvance<ReadFCvtF16ToF32, 0>;
-def : ReadAdvance<ReadFCvtF32ToF16, 0>;
-def : ReadAdvance<ReadFCvtF16ToF64, 0>;
-def : ReadAdvance<ReadFCvtF64ToF16, 0>;
-def : ReadAdvance<ReadFMovF16ToI16, 0>;
-def : ReadAdvance<ReadFMovI16ToF16, 0>;
-def : ReadAdvance<ReadFMovF32ToI32, 0>;
-def : ReadAdvance<ReadFMovI32ToF32, 0>;
-def : ReadAdvance<ReadFMovF64ToI64, 0>;
-def : ReadAdvance<ReadFMovI64ToF64, 0>;
-def : ReadAdvance<ReadFClass16, 0>;
-def : ReadAdvance<ReadFClass32, 0>;
-def : ReadAdvance<ReadFClass64, 0>;
-
-def : SiFive7AnyToGPRBypass<ReadSFBJmp, 0>;
-def : SiFive7AnyToGPRBypass<ReadSFBALU, 0>;
-
-// Bitmanip
-def : SiFive7AnyToGPRBypass<ReadRotateImm>;
-def : SiFive7AnyToGPRBypass<ReadRotateImm32>;
-def : SiFive7AnyToGPRBypass<ReadRotateReg>;
-def : SiFive7AnyToGPRBypass<ReadRotateReg32>;
-def : SiFive7AnyToGPRBypass<ReadCLZ>;
-def : SiFive7AnyToGPRBypass<ReadCLZ32>;
-def : SiFive7AnyToGPRBypass<ReadCTZ>;
-def : SiFive7AnyToGPRBypass<ReadCTZ32>;
-def : ReadAdvance<ReadCPOP, 0>;
-def : ReadAdvance<ReadCPOP32, 0>;
-def : SiFive7AnyToGPRBypass<ReadORCB>;
-def : SiFive7AnyToGPRBypass<ReadIMinMax>;
-def : SiFive7AnyToGPRBypass<ReadREV8>;
-def : SiFive7AnyToGPRBypass<ReadSHXADD>;
-def : SiFive7AnyToGPRBypass<ReadSHXADD32>;
-// Single-bit instructions
-def : SiFive7AnyToGPRBypass<ReadSingleBit>;
-def : SiFive7AnyToGPRBypass<ReadSingleBitImm>;
-
-// 6. Configuration-Setting Instructions
-def : ReadAdvance<ReadVSETVLI, 2>;
-def : ReadAdvance<ReadVSETVL, 2>;
-
-// 7. Vector Loads and Stores
-def : ReadAdvance<ReadVLDX, 0>;
-def : ReadAdvance<ReadVSTX, 0>;
-defm "" : LMULReadAdvance<"ReadVSTEV", 0>;
-defm "" : LMULReadAdvance<"ReadVSTM", 0>;
-def : ReadAdvance<ReadVLDSX, 0>;
-def : ReadAdvance<ReadVSTSX, 0>;
-defm "" : LMULReadAdvance<"ReadVSTS8V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTS16V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTS32V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTS64V", 0>;
-defm "" : LMULReadAdvance<"ReadVLDUXV", 0>;
-defm "" : LMULReadAdvance<"ReadVLDOXV", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX8", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX16", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX32", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX64", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUXV", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX8V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX16V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX32V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTUX64V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX8", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX16", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX32", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX64", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOXV", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX8V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX16V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX32V", 0>;
-defm "" : LMULReadAdvance<"ReadVSTOX64V", 0>;
-// LMUL Aware
-def : ReadAdvance<ReadVST1R, 0>;
-def : ReadAdvance<ReadVST2R, 0>;
-def : ReadAdvance<ReadVST4R, 0>;
-def : ReadAdvance<ReadVST8R, 0>;
-
-// 12. Vector Integer Arithmetic Instructions
-defm : LMULReadAdvance<"ReadVIALUV", 0>;
-defm : LMULReadAdvance<"ReadVIALUX", 0>;
-defm : LMULReadAdvanceW<"ReadVIWALUV", 0>;
-defm : LMULReadAdvanceW<"ReadVIWALUX", 0>;
-defm : LMULReadAdvance<"ReadVExtV", 0>;
-defm : LMULReadAdvance<"ReadVICALUV", 0>;
-defm : LMULReadAdvance<"ReadVICALUX", 0>;
-defm : LMULReadAdvance<"ReadVShiftV", 0>;
-defm : LMULReadAdvance<"ReadVShiftX", 0>;
-defm : LMULReadAdvanceW<"ReadVNShiftV", 0>;
-defm : LMULReadAdvanceW<"ReadVNShiftX", 0>;
-defm : LMULReadAdvance<"ReadVICmpV", 0>;
-defm : LMULReadAdvance<"ReadVICmpX", 0>;
-defm : LMULReadAdvance<"ReadVIMinMaxV", 0>;
-defm : LMULReadAdvance<"ReadVIMinMaxX", 0>;
-defm : LMULReadAdvance<"ReadVIMulV", 0>;
-defm : LMULReadAdvance<"ReadVIMulX", 0>;
-defm : LMULSEWReadAdvance<"ReadVIDivV", 0>;
-defm : LMULSEWReadAdvance<"ReadVIDivX", 0>;
-defm : LMULReadAdvanceW<"ReadVIWMulV", 0>;
-defm : LMULReadAdvanceW<"ReadVIWMulX", 0>;
-defm : LMULReadAdvance<"ReadVIMulAddV", 0>;
-defm : LMULReadAdvance<"ReadVIMulAddX", 0>;
-defm : LMULReadAdvanceW<"ReadVIWMulAddV", 0>;
-defm : LMULReadAdvanceW<"ReadVIWMulAddX", 0>;
-defm : LMULReadAdvance<"ReadVIMergeV", 0>;
-defm : LMULReadAdvance<"ReadVIMergeX", 0>;
-defm : LMULReadAdvance<"ReadVIMovV", 0>;
-defm : LMULReadAdvance<"ReadVIMovX", 0>;
-
-// 13. Vector Fixed-Point Arithmetic Instructions
-defm "" : LMULReadAdvance<"ReadVSALUV", 0>;
-defm "" : LMULReadAdvance<"ReadVSALUX", 0>;
-defm "" : LMULReadAdvance<"ReadVAALUV", 0>;
-defm "" : LMULReadAdvance<"ReadVAALUX", 0>;
-defm "" : LMULReadAdvance<"ReadVSMulV", 0>;
-defm "" : LMULReadAdvance<"ReadVSMulX", 0>;
-defm "" : LMULReadAdvance<"ReadVSShiftV", 0>;
-defm "" : LMULReadAdvance<"ReadVSShiftX", 0>;
-defm "" : LMULReadAdvanceW<"ReadVNClipV", 0>;
-defm "" : LMULReadAdvanceW<"ReadVNClipX", 0>;
-
-// 14. Vector Floating-Point Instructions
-defm "" : LMULSEWReadAdvanceF<"ReadVFALUV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFALUF", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWALUV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWALUF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMulV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMulF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFDivV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFDivF", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMulAddV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMulAddF", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulAddV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWMulAddF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFSqrtV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFRecpV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMinMaxV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFMinMaxF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFSgnjV", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFSgnjF", 0>;
-defm "" : LMULReadAdvance<"ReadVFCmpV", 0>;
-defm "" : LMULReadAdvance<"ReadVFCmpF", 0>;
-defm "" : LMULReadAdvance<"ReadVFClassV", 0>;
-defm "" : LMULReadAdvance<"ReadVFMergeV", 0>;
-defm "" : LMULReadAdvance<"ReadVFMergeF", 0>;
-defm "" : LMULReadAdvance<"ReadVFMovF", 0>;
-defm "" : LMULSEWReadAdvanceF<"ReadVFCvtIToFV", 0>;
-defm "" : LMULReadAdvance<"ReadVFCvtFToIV", 0>;
-defm "" : LMULSEWReadAdvanceW<"ReadVFWCvtIToFV", 0>;
-defm "" : LMULReadAdvanceFW<"ReadVFWCvtFToIV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFWCvtFToFV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFNCvtIToFV", 0>;
-defm "" : LMULReadAdvanceW<"ReadVFNCvtFToIV", 0>;
-defm "" : LMULSEWReadAdvanceFW<"ReadVFNCvtFToFV", 0>;
-
-// 15. Vector Reduction Operations
-def : ReadAdvance<ReadVIRedV, 0>;
-def : ReadAdvance<ReadVIRedV0, 0>;
-def : ReadAdvance<ReadVIWRedV, 0>;
-def : ReadAdvance<ReadVIWRedV0, 0>;
-def : ReadAdvance<ReadVFRedV, 0>;
-def : ReadAdvance<ReadVFRedV0, 0>;
-def : ReadAdvance<ReadVFRedOV, 0>;
-def : ReadAdvance<ReadVFRedOV0, 0>;
-def : ReadAdvance<ReadVFWRedV, 0>;
-def : ReadAdvance<ReadVFWRedV0, 0>;
-def : ReadAdvance<ReadVFWRedOV, 0>;
-def : ReadAdvance<ReadVFWRedOV0, 0>;
-
-// 16. Vector Mask Instructions
-defm "" : LMULReadAdvance<"ReadVMALUV", 0>;
-defm "" : LMULReadAdvance<"ReadVMPopV", 0>;
-defm "" : LMULReadAdvance<"ReadVMFFSV", 0>;
-defm "" : LMULReadAdvance<"ReadVMSFSV", 0>;
-defm "" : LMULReadAdvance<"ReadVIotaV", 0>;
-
-// 17. Vector Permutation Instructions
-def : ReadAdvance<ReadVMovXS, 0>;
-def : ReadAdvance<ReadVMovSX_V, 0>;
-def : ReadAdvance<ReadVMovSX_X, 0>;
-def : ReadAdvance<ReadVMovFS, 0>;
-def : ReadAdvance<ReadVMovSF_V, 0>;
-def : ReadAdvance<ReadVMovSF_F, 0>;
-defm "" : LMULReadAdvance<"ReadVISlideV", 0>;
-defm "" : LMULReadAdvance<"ReadVISlideX", 0>;
-defm "" : LMULReadAdvance<"ReadVFSlideV", 0>;
-defm "" : LMULReadAdvance<"ReadVFSlideF", 0>;
-defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>;
-defm "" : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>;
-defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>;
-defm "" : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>;
-defm "" : LMULReadAdvance<"ReadVRGatherVX_data", 0>;
-defm "" : LMULReadAdvance<"ReadVRGatherVX_index", 0>;
-defm "" : LMULReadAdvance<"ReadVRGatherVI_data", 0>;
-defm "" : LMULSEWReadAdvance<"ReadVCompressV", 0>;
-// LMUL Aware
-def : ReadAdvance<ReadVMov1V, 0>;
-def : ReadAdvance<ReadVMov2V, 0>;
-def : ReadAdvance<ReadVMov4V, 0>;
-def : ReadAdvance<ReadVMov8V, 0>;
-
-// Others
-def : ReadAdvance<ReadVMask, 0>;
-def : ReadAdvance<ReadVPassthru_WorstCase, 0>;
-foreach mx = SchedMxList in {
-  def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx), 0>;
-  foreach sew = SchedSEWSet<mx>.val in
-    def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx  # "_E" # sew), 0>;
+multiclass SiFive7ReadAdvance {
+  // Bypass and advance
+  def : SiFive7AnyToGPRBypass<ReadJmp>;
+  def : SiFive7AnyToGPRBypass<ReadJalr>;
+  def : ReadAdvance<ReadCSR, 0>;
+  def : SiFive7AnyToGPRBypass<ReadStoreData>;
+  def : ReadAdvance<ReadMemBase, 0>;
+  def : SiFive7AnyToGPRBypass<ReadIALU>;
+  def : SiFive7AnyToGPRBypass<ReadIALU32>;
+  def : SiFive7AnyToGPRBypass<ReadShiftImm>;
+  def : SiFive7AnyToGPRBypass<ReadShiftImm32>;
+  def : SiFive7AnyToGPRBypass<ReadShiftReg>;
+  def : SiFive7AnyToGPRBypass<ReadShiftReg32>;
+  def : ReadAdvance<ReadIDiv, 0>;
+  def : ReadAdvance<ReadIDiv32, 0>;
+  def : ReadAdvance<ReadIRem, 0>;
+  def : ReadAdvance<ReadIRem32, 0>;
+  def : ReadAdvance<ReadIMul, 0>;
+  def : ReadAdvance<ReadIMul32, 0>;
+  def : ReadAdvance<ReadAtomicWA, 0>;
+  def : ReadAdvance<ReadAtomicWD, 0>;
+  def : ReadAdvance<ReadAtomicDA, 0>;
+  def : ReadAdvance<ReadAtomicDD, 0>;
+  def : ReadAdvance<ReadAtomicLDW, 0>;
+  def : ReadAdvance<ReadAtomicLDD, 0>;
+  def : ReadAdvance<ReadAtomicSTW, 0>;
+  def : ReadAdvance<ReadAtomicSTD, 0>;
+  def : ReadAdvance<ReadFStoreData, 0>;
+  def : ReadAdvance<ReadFMemBase, 0>;
+  def : ReadAdvance<ReadFAdd16, 0>;
+  def : ReadAdvance<ReadFAdd32, 0>;
+  def : ReadAdvance<ReadFAdd64, 0>;
+  def : ReadAdvance<ReadFMul16, 0>;
+  def : ReadAdvance<ReadFMA16, 0>;
+  def : ReadAdvance<ReadFMA16Addend, 0>;
+  def : ReadAdvance<ReadFMul32, 0>;
+  def : ReadAdvance<ReadFMul64, 0>;
+  def : ReadAdvance<ReadFMA32, 0>;
+  def : ReadAdvance<ReadFMA32Addend, 0>;
+  def : ReadAdvance<ReadFMA64, 0>;
+  def : ReadAdvance<ReadFMA64Addend, 0>;
+  def : ReadAdvance<ReadFDiv16, 0>;
+  def : ReadAdvance<ReadFDiv32, 0>;
+  def : ReadAdvance<ReadFDiv64, 0>;
+  def : ReadAdvance<ReadFSqrt16, 0>;
+  def : ReadAdvance<ReadFSqrt32, 0>;
+  def : ReadAdvance<ReadFSqrt64, 0>;
+  def : ReadAdvance<ReadFCmp16, 0>;
+  def : ReadAdvance<ReadFCmp32, 0>;
+  def : ReadAdvance<ReadFCmp64, 0>;
+  def : ReadAdvance<ReadFSGNJ16, 0>;
+  def : ReadAdvance<ReadFSGNJ32, 0>;
+  def : ReadAdvance<ReadFSGNJ64, 0>;
+  def : ReadAdvance<ReadFMinMax16, 0>;
+  def : ReadAdvance<ReadFMinMax32, 0>;
+  def : ReadAdvance<ReadFMinMax64, 0>;
+  def : ReadAdvance<ReadFCvtF16ToI32, 0>;
+  def : ReadAdvance<ReadFCvtF16ToI64, 0>;
+  def : ReadAdvance<ReadFCvtF32ToI32, 0>;
+  def : ReadAdvance<ReadFCvtF32ToI64, 0>;
+  def : ReadAdvance<ReadFCvtF64ToI32, 0>;
+  def : ReadAdvance<ReadFCvtF64ToI64, 0>;
+  def : ReadAdvance<ReadFCvtI32ToF16, 0>;
+  def : ReadAdvance<ReadFCvtI32ToF32, 0>;
+  def : ReadAdvance<ReadFCvtI32ToF64, 0>;
+  def : ReadAdvance<ReadFCvtI64ToF16, 0>;
+  def : ReadAdvance<ReadFCvtI64ToF32, 0>;
+  def : ReadAdvance<ReadFCvtI64ToF64, 0>;
+  def : ReadAdvance<ReadFCvtF32ToF64, 0>;
+  def : ReadAdvance<ReadFCvtF64ToF32, 0>;
+  def : ReadAdvance<ReadFCvtF16ToF32, 0>;
+  def : ReadAdvance<ReadFCvtF32ToF16, 0>;
+  def : ReadAdvance<ReadFCvtF16ToF64, 0>;
+  def : ReadAdvance<ReadFCvtF64ToF16, 0>;
+  def : ReadAdvance<ReadFMovF16ToI16, 0>;
+  def : ReadAdvance<ReadFMovI16ToF16, 0>;
+  def : ReadAdvance<ReadFMovF32ToI32, 0>;
+  def : ReadAdvance<ReadFMovI32ToF32, 0>;
+  def : ReadAdvance<ReadFMovF64ToI64, 0>;
+  def : ReadAdvance<ReadFMovI64ToF64, 0>;
+  def : ReadAdvance<ReadFClass16, 0>;
+  def : ReadAdvance<ReadFClass32, 0>;
+  def : ReadAdvance<ReadFClass64, 0>;
+
+  def : SiFive7AnyToGPRBypass<ReadSFBJmp, 0>;
+  def : SiFive7AnyToGPRBypass<ReadSFBALU, 0>;
+
+  // Bitmanip
+  def : SiFive7AnyToGPRBypass<ReadRotateImm>;
+  def : SiFive7AnyToGPRBypass<ReadRotateImm32>;
+  def : SiFive7AnyToGPRBypass<ReadRotateReg>;
+  def : SiFive7AnyToGPRBypass<ReadRotateReg32>;
+  def : SiFive7AnyToGPRBypass<ReadCLZ>;
+  def : SiFive7AnyToGPRBypass<ReadCLZ32>;
+  def : SiFive7AnyToGPRBypass<ReadCTZ>;
+  def : SiFive7AnyToGPRBypass<ReadCTZ32>;
+  def : ReadAdvance<ReadCPOP, 0>;
+  def : ReadAdvance<ReadCPOP32, 0>;
+  def : SiFive7AnyToGPRBypass<ReadORCB>;
+  def : SiFive7AnyToGPRBypass<ReadIMinMax>;
+  def : SiFive7AnyToGPRBypass<ReadREV8>;
+  def : SiFive7AnyToGPRBypass<ReadSHXADD>;
+  def : SiFive7AnyToGPRBypass<ReadSHXADD32>;
+  // Single-bit instructions
+  def : SiFive7AnyToGPRBypass<ReadSingleBit>;
+  def : SiFive7AnyToGPRBypass<ReadSingleBitImm>;
+
+  // 6. Configuration-Setting Instructions
+  def : ReadAdvance<ReadVSETVLI, 2>;
+  def : ReadAdvance<ReadVSETVL, 2>;
+
+  // 7. Vector Loads and Stores
+  def : ReadAdvance<ReadVLDX, 0>;
+  def : ReadAdvance<ReadVSTX, 0>;
+  defm : LMULReadAdvance<"ReadVSTEV", 0>;
+  defm : LMULReadAdvance<"ReadVSTM", 0>;
+  def : ReadAdvance<ReadVLDSX, 0>;
+  def : ReadAdvance<ReadVSTSX, 0>;
+  defm : LMULReadAdvance<"ReadVSTS8V", 0>;
+  defm : LMULReadAdvance<"ReadVSTS16V", 0>;
+  defm : LMULReadAdvance<"ReadVSTS32V", 0>;
+  defm : LMULReadAdvance<"ReadVSTS64V", 0>;
+  defm : LMULReadAdvance<"ReadVLDUXV", 0>;
+  defm : LMULReadAdvance<"ReadVLDOXV", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX8", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX16", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX32", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX64", 0>;
+  defm : LMULReadAdvance<"ReadVSTUXV", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX8V", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX16V", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX32V", 0>;
+  defm : LMULReadAdvance<"ReadVSTUX64V", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX8", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX16", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX32", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX64", 0>;
+  defm : LMULReadAdvance<"ReadVSTOXV", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX8V", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX16V", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX32V", 0>;
+  defm : LMULReadAdvance<"ReadVSTOX64V", 0>;
+  // LMUL Aware
+  def : ReadAdvance<ReadVST1R, 0>;
+  def : ReadAdvance<ReadVST2R, 0>;
+  def : ReadAdvance<ReadVST4R, 0>;
+  def : ReadAdvance<ReadVST8R, 0>;
+
+  // 12. Vector Integer Arithmetic Instructions
+  defm : LMULReadAdvance<"ReadVIALUV", 0>;
+  defm : LMULReadAdvance<"ReadVIALUX", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWALUV", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWALUX", 0>;
+  defm : LMULReadAdvance<"ReadVExtV", 0>;
+  defm : LMULReadAdvance<"ReadVICALUV", 0>;
+  defm : LMULReadAdvance<"ReadVICALUX", 0>;
+  defm : LMULReadAdvance<"ReadVShiftV", 0>;
+  defm : LMULReadAdvance<"ReadVShiftX", 0>;
+  defm : LMULReadAdvanceW<"ReadVNShiftV", 0>;
+  defm : LMULReadAdvanceW<"ReadVNShiftX", 0>;
+  defm : LMULReadAdvance<"ReadVICmpV", 0>;
+  defm : LMULReadAdvance<"ReadVICmpX", 0>;
+  defm : LMULReadAdvance<"ReadVIMinMaxV", 0>;
+  defm : LMULReadAdvance<"ReadVIMinMaxX", 0>;
+  defm : LMULReadAdvance<"ReadVIMulV", 0>;
+  defm : LMULReadAdvance<"ReadVIMulX", 0>;
+  defm : LMULSEWReadAdvance<"ReadVIDivV", 0>;
+  defm : LMULSEWReadAdvance<"ReadVIDivX", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWMulV", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWMulX", 0>;
+  defm : LMULReadAdvance<"ReadVIMulAddV", 0>;
+  defm : LMULReadAdvance<"ReadVIMulAddX", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWMulAddV", 0>;
+  defm : LMULReadAdvanceW<"ReadVIWMulAddX", 0>;
+  defm : LMULReadAdvance<"ReadVIMergeV", 0>;
+  defm : LMULReadAdvance<"ReadVIMergeX", 0>;
+  defm : LMULReadAdvance<"ReadVIMovV", 0>;
+  defm : LMULReadAdvance<"ReadVIMovX", 0>;
+
+  // 13. Vector Fixed-Point Arithmetic Instructions
+  defm : LMULReadAdvance<"ReadVSALUV", 0>;
+  defm : LMULReadAdvance<"ReadVSALUX", 0>;
+  defm : LMULReadAdvance<"ReadVAALUV", 0>;
+  defm : LMULReadAdvance<"ReadVAALUX", 0>;
+  defm : LMULReadAdvance<"ReadVSMulV", 0>;
+  defm : LMULReadAdvance<"ReadVSMulX", 0>;
+  defm : LMULReadAdvance<"ReadVSShiftV", 0>;
+  defm : LMULReadAdvance<"ReadVSShiftX", 0>;
+  defm : LMULReadAdvanceW<"ReadVNClipV", 0>;
+  defm : LMULReadAdvanceW<"ReadVNClipX", 0>;
+
+  // 14. Vector Floating-Point Instructions
+  defm : LMULSEWReadAdvanceF<"ReadVFALUV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFALUF", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWALUV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWALUF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMulV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMulF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFDivV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFDivF", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWMulV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWMulF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMulAddV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMulAddF", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWMulAddV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWMulAddF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFSqrtV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFRecpV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMinMaxV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFMinMaxF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFSgnjV", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFSgnjF", 0>;
+  defm : LMULReadAdvance<"ReadVFCmpV", 0>;
+  defm : LMULReadAdvance<"ReadVFCmpF", 0>;
+  defm : LMULReadAdvance<"ReadVFClassV", 0>;
+  defm : LMULReadAdvance<"ReadVFMergeV", 0>;
+  defm : LMULReadAdvance<"ReadVFMergeF", 0>;
+  defm : LMULReadAdvance<"ReadVFMovF", 0>;
+  defm : LMULSEWReadAdvanceF<"ReadVFCvtIToFV", 0>;
+  defm : LMULReadAdvance<"ReadVFCvtFToIV", 0>;
+  defm : LMULSEWReadAdvanceW<"ReadVFWCvtIToFV", 0>;
+  defm : LMULReadAdvanceFW<"ReadVFWCvtFToIV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFWCvtFToFV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFNCvtIToFV", 0>;
+  defm : LMULReadAdvanceW<"ReadVFNCvtFToIV", 0>;
+  defm : LMULSEWReadAdvanceFW<"ReadVFNCvtFToFV", 0>;
+
+  // 15. Vector Reduction Operations
+  def : ReadAdvance<ReadVIRedV, 0>;
+  def : ReadAdvance<ReadVIRedV0, 0>;
+  def : ReadAdvance<ReadVIWRedV, 0>;
+  def : ReadAdvance<ReadVIWRedV0, 0>;
+  def : ReadAdvance<ReadVFRedV, 0>;
+  def : ReadAdvance<ReadVFRedV0, 0>;
+  def : ReadAdvance<ReadVFRedOV, 0>;
+  def : ReadAdvance<ReadVFRedOV0, 0>;
+  def : ReadAdvance<ReadVFWRedV, 0>;
+  def : ReadAdvance<ReadVFWRedV0, 0>;
+  def : ReadAdvance<ReadVFWRedOV, 0>;
+  def : ReadAdvance<ReadVFWRedOV0, 0>;
+
+  // 16. Vector Mask Instructions
+  defm : LMULReadAdvance<"ReadVMALUV", 0>;
+  defm : LMULReadAdvance<"ReadVMPopV", 0>;
+  defm : LMULReadAdvance<"ReadVMFFSV", 0>;
+  defm : LMULReadAdvance<"ReadVMSFSV", 0>;
+  defm : LMULReadAdvance<"ReadVIotaV", 0>;
+
+  // 17. Vector Permutation Instructions
+  def : ReadAdvance<ReadVMovXS, 0>;
+  def : ReadAdvance<ReadVMovSX_V, 0>;
+  def : ReadAdvance<ReadVMovSX_X, 0>;
+  def : ReadAdvance<ReadVMovFS, 0>;
+  def : ReadAdvance<ReadVMovSF_V, 0>;
+  def : ReadAdvance<ReadVMovSF_F, 0>;
+  defm : LMULReadAdvance<"ReadVISlideV", 0>;
+  defm : LMULReadAdvance<"ReadVISlideX", 0>;
+  defm : LMULReadAdvance<"ReadVFSlideV", 0>;
+  defm : LMULReadAdvance<"ReadVFSlideF", 0>;
+  defm : LMULSEWReadAdvance<"ReadVRGatherVV_data", 0>;
+  defm : LMULSEWReadAdvance<"ReadVRGatherVV_index", 0>;
+  defm : LMULSEWReadAdvance<"ReadVRGatherEI16VV_data", 0>;
+  defm : LMULSEWReadAdvance<"ReadVRGatherEI16VV_index", 0>;
+  defm : LMULReadAdvance<"ReadVRGatherVX_data", 0>;
+  defm : LMULReadAdvance<"ReadVRGatherVX_index", 0>;
+  defm : LMULReadAdvance<"ReadVRGatherVI_data", 0>;
+  defm : LMULSEWReadAdvance<"ReadVCompressV", 0>;
+  // LMUL Aware
+  def : ReadAdvance<ReadVMov1V, 0>;
+  def : ReadAdvance<ReadVMov2V, 0>;
+  def : ReadAdvance<ReadVMov4V, 0>;
+  def : ReadAdvance<ReadVMov8V, 0>;
+
+  // Others
+  def : ReadAdvance<ReadVMask, 0>;
+  def : ReadAdvance<ReadVPassthru_WorstCase, 0>;
+  foreach mx = SchedMxList in {
+    def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx), 0>;
+    foreach sew = SchedSEWSet<mx>.val in
+      def : ReadAdvance<!cast<SchedRead>("ReadVPassthru_" # mx  # "_E" # sew), 0>;
+  }
 }
 
 //===----------------------------------------------------------------------===//
-// Unsupported extensions
-defm : UnsupportedSchedQ;
-defm : UnsupportedSchedZabha;
-defm : UnsupportedSchedZbc;
-defm : UnsupportedSchedZbkb;
-defm : UnsupportedSchedZbkx;
-defm : UnsupportedSchedZfa;
-defm : UnsupportedSchedZvk;
+
+/// This multiclass is a "bundle" of (1) processor resources (i.e. pipes) and
+/// (2) WriteRes entries. It's parameterized by config values that will
+/// eventually be supplied by different SchedMachineModels.
+multiclass SiFive7SchedResources<int vlen> {
+  defm SiFive7 : SiFive7ProcResources;
+
+  // Pull out defs from SiFive7ProcResources so we can refer to them by name.
+  defvar SiFive7PipeA = !cast<ProcResource>(NAME # SiFive7PipeA);
+  defvar SiFive7PipeB = !cast<ProcResource>(NAME # SiFive7PipeB);
+  defvar SiFive7PipeAB = !cast<ProcResGroup>(NAME # SiFive7PipeAB);
+  defvar SiFive7IDiv = !cast<ProcResource>(NAME # SiFive7IDiv);
+  defvar SiFive7FDiv = !cast<ProcResource>(NAME # SiFive7FDiv);
+  defvar SiFive7VA = !cast<ProcResource>(NAME # SiFive7VA);
+  defvar SiFive7VL = !cast<ProcResource>(NAME # SiFive7VL);
+  defvar SiFive7VS = !cast<ProcResource>(NAME # SiFive7VS);
+  defvar SiFive7VCQ = !cast<ProcResource>(NAME # SiFive7VCQ);
+
+  // Define WriteRes records that are the same across all SiFive7 derived
+  // SchedModels.
+  defm SiFive7
+      : SiFive7WriteResBase<vlen, SiFive7PipeA, SiFive7PipeB, SiFive7PipeAB,
+                            SiFive7IDiv, SiFive7FDiv,
+                            SiFive7VA, SiFive7VL, SiFive7VS,
+                            SiFive7VCQ>;
+
+  //===----------------------------------------------------------------------===//
+  // Bypass and advance
+
+  defm SiFive7 : SiFive7ReadAdvance;
+  //===----------------------------------------------------------------------===//
+  // Unsupported extensions
+  defm : UnsupportedSchedQ;
+  defm : UnsupportedSchedZabha;
+  defm : UnsupportedSchedZbc;
+  defm : UnsupportedSchedZbkb;
+  defm : UnsupportedSchedZbkx;
+  defm : UnsupportedSchedZfa;
+  defm : UnsupportedSchedZvk;
----------------
wangpc-pp wrote:

Maybe just a small future problem: if Zvk or other extensions are supported, we will need some sub-multiclasses (or controlled by arguments and `if` statements).

https://github.com/llvm/llvm-project/pull/144442


More information about the llvm-commits mailing list