[llvm] 2b04532 - [RISCV] Add scheduling resources for vector segment instructions.
Monk Chiang via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 12 22:52:07 PDT 2022
Author: Monk Chiang
Date: 2022-07-12T22:51:58-07:00
New Revision: 2b045324b2ca3974b8acbcb2867dd39159a75ea3
URL: https://github.com/llvm/llvm-project/commit/2b045324b2ca3974b8acbcb2867dd39159a75ea3
DIFF: https://github.com/llvm/llvm-project/commit/2b045324b2ca3974b8acbcb2867dd39159a75ea3.diff
LOG: [RISCV] Add scheduling resources for vector segment instructions.
Add scheduling resources for vector segment instructions
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D128886
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoV.td
llvm/lib/Target/RISCV/RISCVScheduleV.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index d466e278cafce..1ad634344c09a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -115,6 +115,35 @@ class VSXSched<int n, string o> :
class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
ReadVLDX, ReadVMask]>;
+// Unit-Stride Segment Loads and Stores
+class VLSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVLSEG" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
+class VSSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVSSEG" #nf #"e" #eew),
+ !cast<SchedReadWrite>("ReadVSTE" #eew #"V"), ReadVSTX, ReadVMask]>;
+class VLSEGFFSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVLSEGFF" #nf #"e" #eew), ReadVLDX, ReadVMask]>;
+// Strided Segment Loads and Stores
+class VLSSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVLSSEG" #nf #"e" #eew), ReadVLDX, ReadVLDSX,
+ ReadVMask]>;
+class VSSSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew),
+ !cast<SchedReadWrite>("ReadVSTS" #eew #"V"), ReadVSTX, ReadVSTSX, ReadVMask]>;
+// Indexed Segment Loads and Stores
+class VLUXSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVLUXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDUXV,
+ ReadVMask]>;
+class VLOXSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVLOXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDOXV,
+ ReadVMask]>;
+class VSUXSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVSUXSEG" #nf #"e" #eew),
+ !cast<SchedReadWrite>("ReadVSTUX" #eew), ReadVSTX, ReadVSTUXV, ReadVMask]>;
+class VSOXSEGSched<int nf, int eew> : Sched<[
+ !cast<SchedReadWrite>("WriteVSOXSEG" #nf #"e" #eew),
+ !cast<SchedReadWrite>("ReadVSTOX" #eew), ReadVSTX, ReadVSTOXV, ReadVMask]>;
+
//===----------------------------------------------------------------------===//
// Instruction class templates
//===----------------------------------------------------------------------===//
@@ -1495,31 +1524,35 @@ let Predicates = [HasVInstructions] in {
defvar w = !cast<RISCVWidth>("LSWidth"#eew);
def VLSEG#nf#E#eew#_V :
- VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">;
+ VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
+ VLSEGSched<nf, eew>;
def VLSEG#nf#E#eew#FF_V :
- VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">;
+ VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
+ VLSEGFFSched<nf, eew>;
def VSSEG#nf#E#eew#_V :
- VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">;
-
+ VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
+ VSSEGSched<nf, eew>;
// Vector Strided Instructions
def VLSSEG#nf#E#eew#_V :
- VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">;
+ VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
+ VLSSEGSched<nf, eew>;
def VSSSEG#nf#E#eew#_V :
- VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">;
+ VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
+ VSSSEGSched<nf, eew>;
// Vector Indexed Instructions
def VLUXSEG#nf#EI#eew#_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
- "vluxseg"#nf#"ei"#eew#".v">;
+ "vluxseg"#nf#"ei"#eew#".v">, VLUXSEGSched<nf, eew>;
def VLOXSEG#nf#EI#eew#_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
- "vloxseg"#nf#"ei"#eew#".v">;
+ "vloxseg"#nf#"ei"#eew#".v">, VLOXSEGSched<nf, eew>;
def VSUXSEG#nf#EI#eew#_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
- "vsuxseg"#nf#"ei"#eew#".v">;
+ "vsuxseg"#nf#"ei"#eew#".v">, VSUXSEGSched<nf, eew>;
def VSOXSEG#nf#EI#eew#_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
- "vsoxseg"#nf#"ei"#eew#".v">;
+ "vsoxseg"#nf#"ei"#eew#".v">, VSOXSEGSched<nf, eew>;
}
}
} // Predicates = [HasVInstructions]
@@ -1528,17 +1561,22 @@ let Predicates = [HasVInstructionsI64] in {
foreach nf=2-8 in {
// Vector Unit-strided Segment Instructions
def VLSEG#nf#E64_V :
- VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">;
+ VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
+ VLSEGSched<nf, 64>;
def VLSEG#nf#E64FF_V :
- VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">;
+ VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
+ VLSEGFFSched<nf, 64>;
def VSSEG#nf#E64_V :
- VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
+ VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
+ VSSEGSched<nf, 64>;
// Vector Strided Segment Instructions
def VLSSEG#nf#E64_V :
- VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
+ VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
+ VLSSEGSched<nf, 64>;
def VSSSEG#nf#E64_V :
- VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
+ VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
+ VSSSEGSched<nf, 64>;
}
} // Predicates = [HasVInstructionsI64]
let Predicates = [HasVInstructionsI64, IsRV64] in {
@@ -1546,16 +1584,16 @@ let Predicates = [HasVInstructionsI64, IsRV64] in {
// Vector Indexed Segment Instructions
def VLUXSEG#nf#EI64_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
- "vluxseg"#nf#"ei64.v">;
+ "vluxseg"#nf#"ei64.v">, VLUXSEGSched<nf, 64>;
def VLOXSEG#nf#EI64_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
- "vloxseg"#nf#"ei64.v">;
+ "vloxseg"#nf#"ei64.v">, VLOXSEGSched<nf, 64>;
def VSUXSEG#nf#EI64_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
- "vsuxseg"#nf#"ei64.v">;
+ "vsuxseg"#nf#"ei64.v">, VSUXSEGSched<nf, 64>;
def VSOXSEG#nf#EI64_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
- "vsoxseg"#nf#"ei64.v">;
+ "vsoxseg"#nf#"ei64.v">, VSOXSEGSched<nf, 64>;
}
} // Predicates = [HasVInstructionsI64, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 43af1802d706f..bafcf47b82e43 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -53,6 +53,20 @@ def WriteVLDFF8 : SchedWrite;
def WriteVLDFF16 : SchedWrite;
def WriteVLDFF32 : SchedWrite;
def WriteVLDFF64 : SchedWrite;
+// 7.8. Vector Segment Instructions
+foreach nf=2-8 in {
+ foreach eew = [8, 16, 32, 64] in {
+ def WriteVLSEG # nf # e # eew : SchedWrite;
+ def WriteVSSEG # nf # e # eew : SchedWrite;
+ def WriteVLSEGFF # nf # e # eew : SchedWrite;
+ def WriteVLSSEG # nf # e # eew : SchedWrite;
+ def WriteVSSSEG # nf # e # eew : SchedWrite;
+ def WriteVLUXSEG # nf # e # eew : SchedWrite;
+ def WriteVLOXSEG # nf # e # eew : SchedWrite;
+ def WriteVSUXSEG # nf # e # eew : SchedWrite;
+ def WriteVSOXSEG # nf # e # eew : SchedWrite;
+ }
+}
// 7.9. Vector Whole Register Instructions
def WriteVLD1R8 : SchedWrite;
def WriteVLD1R16 : SchedWrite;
@@ -538,6 +552,20 @@ def : WriteRes<WriteVST1R, []>;
def : WriteRes<WriteVST2R, []>;
def : WriteRes<WriteVST4R, []>;
def : WriteRes<WriteVST8R, []>;
+// Vector Segment Loads and Stores
+foreach nf=2-8 in {
+ foreach eew = [8, 16, 32, 64] in {
+ def : WriteRes <!cast<SchedWrite>("WriteVLSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVLSEGFF" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVSSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVLSSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVSSSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVLUXSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVLOXSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVSUXSEG" # nf # "e" # eew), []>;
+ def : WriteRes <!cast<SchedWrite>("WriteVSOXSEG" # nf # "e" # eew), []>;
+ }
+}
// 12. Vector Integer Arithmetic Instructions
def : WriteRes<WriteVIALUV, []>;
More information about the llvm-commits
mailing list