[llvm] 0af4651 - [RISCV] Add scheduling class for vector pseudo segment instructions.
Monk Chiang via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 16 17:54:54 PDT 2022
Author: Monk Chiang
Date: 2022-08-16T17:54:47-07:00
New Revision: 0af4651c0fc7d8887c7086381f6f514132899b20
URL: https://github.com/llvm/llvm-project/commit/0af4651c0fc7d8887c7086381f6f514132899b20
DIFF: https://github.com/llvm/llvm-project/commit/0af4651c0fc7d8887c7086381f6f514132899b20.diff
LOG: [RISCV] Add scheduling class for vector pseudo segment instructions.
Add scheduling resource for vector segment load/store instructions in D128886.
I miss to add scheduling resource for pseudo segment instructions.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D130222
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 1ad634344c09..7a2701e1e670 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -131,18 +131,13 @@ class VSSSEGSched<int nf, int eew> : Sched<[
!cast<SchedReadWrite>("WriteVSSSEG" #nf #"e" #eew),
!cast<SchedReadWrite>("ReadVSTS" #eew #"V"), ReadVSTX, ReadVSTSX, ReadVMask]>;
// Indexed Segment Loads and Stores
-class VLUXSEGSched<int nf, int eew> : Sched<[
- !cast<SchedReadWrite>("WriteVLUXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDUXV,
- ReadVMask]>;
-class VLOXSEGSched<int nf, int eew> : Sched<[
- !cast<SchedReadWrite>("WriteVLOXSEG" #nf #"e" #eew), ReadVLDX, ReadVLDOXV,
- ReadVMask]>;
-class VSUXSEGSched<int nf, int eew> : Sched<[
- !cast<SchedReadWrite>("WriteVSUXSEG" #nf #"e" #eew),
- !cast<SchedReadWrite>("ReadVSTUX" #eew), ReadVSTX, ReadVSTUXV, ReadVMask]>;
-class VSOXSEGSched<int nf, int eew> : Sched<[
- !cast<SchedReadWrite>("WriteVSOXSEG" #nf #"e" #eew),
- !cast<SchedReadWrite>("ReadVSTOX" #eew), ReadVSTX, ReadVSTOXV, ReadVMask]>;
+class VLXSEGSched<int nf, int eew, string o> : Sched<[
+ !cast<SchedReadWrite>("WriteVL" #o # "XSEG" #nf #"e" #eew), ReadVLDX,
+ !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
+class VSXSEGSched<int nf, int eew, string o> : Sched<[
+ !cast<SchedReadWrite>("WriteVS" #o # "XSEG" #nf #"e" #eew),
+ !cast<SchedReadWrite>("ReadVST" #o # "X" #eew), ReadVSTX,
+ !cast<SchedReadWrite>("ReadVST" #o # "XV"), ReadVMask]>;
//===----------------------------------------------------------------------===//
// Instruction class templates
@@ -1543,16 +1538,20 @@ let Predicates = [HasVInstructions] in {
// Vector Indexed Instructions
def VLUXSEG#nf#EI#eew#_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
- "vluxseg"#nf#"ei"#eew#".v">, VLUXSEGSched<nf, eew>;
+ "vluxseg"#nf#"ei"#eew#".v">,
+ VLXSEGSched<nf, eew, "U">;
def VLOXSEG#nf#EI#eew#_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
- "vloxseg"#nf#"ei"#eew#".v">, VLOXSEGSched<nf, eew>;
+ "vloxseg"#nf#"ei"#eew#".v">,
+ VLXSEGSched<nf, eew, "O">;
def VSUXSEG#nf#EI#eew#_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
- "vsuxseg"#nf#"ei"#eew#".v">, VSUXSEGSched<nf, eew>;
+ "vsuxseg"#nf#"ei"#eew#".v">,
+ VSXSEGSched<nf, eew, "U">;
def VSOXSEG#nf#EI#eew#_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
- "vsoxseg"#nf#"ei"#eew#".v">, VSOXSEGSched<nf, eew>;
+ "vsoxseg"#nf#"ei"#eew#".v">,
+ VSXSEGSched<nf, eew, "O">;
}
}
} // Predicates = [HasVInstructions]
@@ -1584,16 +1583,16 @@ let Predicates = [HasVInstructionsI64, IsRV64] in {
// Vector Indexed Segment Instructions
def VLUXSEG#nf#EI64_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
- "vluxseg"#nf#"ei64.v">, VLUXSEGSched<nf, 64>;
+ "vluxseg"#nf#"ei64.v">, VLXSEGSched<nf, 64, "U">;
def VLOXSEG#nf#EI64_V :
VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
- "vloxseg"#nf#"ei64.v">, VLOXSEGSched<nf, 64>;
+ "vloxseg"#nf#"ei64.v">, VLXSEGSched<nf, 64, "O">;
def VSUXSEG#nf#EI64_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
- "vsuxseg"#nf#"ei64.v">, VSUXSEGSched<nf, 64>;
+ "vsuxseg"#nf#"ei64.v">, VSXSEGSched<nf, 64, "U">;
def VSOXSEG#nf#EI64_V :
VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
- "vsoxseg"#nf#"ei64.v">, VSOXSEGSched<nf, 64>;
+ "vsoxseg"#nf#"ei64.v">, VSXSEGSched<nf, 64, "O">;
}
} // Predicates = [HasVInstructionsI64, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 03e77c78692a..efa5a7321800 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2837,11 +2837,11 @@ multiclass VPseudoUSSegLoad {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo :
- VPseudoUSSegLoadNoMask<vreg, eew, nf>;
+ VPseudoUSSegLoadNoMask<vreg, eew, nf>, VLSEGSched<nf, eew>;
def nf # "E" # eew # "_V_" # LInfo # "_TU" :
- VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>;
+ VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>, VLSEGSched<nf, eew>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
- VPseudoUSSegLoadMask<vreg, eew, nf>;
+ VPseudoUSSegLoadMask<vreg, eew, nf>, VLSEGSched<nf, eew>;
}
}
}
@@ -2856,11 +2856,11 @@ multiclass VPseudoUSSegLoadFF {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "FF_V_" # LInfo :
- VPseudoUSSegLoadFFNoMask<vreg, eew, nf>;
+ VPseudoUSSegLoadFFNoMask<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
def nf # "E" # eew # "FF_V_" # LInfo # "_TU" :
- VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>;
+ VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
- VPseudoUSSegLoadFFMask<vreg, eew, nf>;
+ VPseudoUSSegLoadFFMask<vreg, eew, nf>, VLSEGFFSched<nf, eew>;
}
}
}
@@ -2874,9 +2874,12 @@ multiclass VPseudoSSegLoad {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
- def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU<vreg, eew, nf>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>,
+ VLSSEGSched<nf, eew>;
+ def nf # "E" # eew # "_V_" # LInfo # "_TU" : VPseudoSSegLoadNoMaskTU<vreg, eew, nf>,
+ VLSSEGSched<nf, eew>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>,
+ VLSSEGSched<nf, eew>;
}
}
}
@@ -2896,18 +2899,22 @@ multiclass VPseudoISegLoad<bit Ordered> {
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = val_lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
+ defvar Order = !if(Ordered, "O", "U");
let VLMul = val_lmul.value in {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
- nf, Ordered>;
+ nf, Ordered>,
+ VLXSEGSched<nf, idx_eew, Order>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_TU" :
VPseudoISegLoadNoMaskTU<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
- nf, Ordered>;
+ nf, Ordered>,
+ VLXSEGSched<nf, idx_eew, Order>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
- nf, Ordered>;
+ nf, Ordered>,
+ VLXSEGSched<nf, idx_eew, Order>;
}
}
}
@@ -2923,8 +2930,10 @@ multiclass VPseudoUSSegStore {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>,
+ VSSEGSched<nf, eew>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>,
+ VSSEGSched<nf, eew>;
}
}
}
@@ -2938,8 +2947,10 @@ multiclass VPseudoSSegStore {
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
- def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
- def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>,
+ VSSSEGSched<nf, eew>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>,
+ VSSSEGSched<nf, eew>;
}
}
}
@@ -2959,15 +2970,18 @@ multiclass VPseudoISegStore<bit Ordered> {
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = val_lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
+ defvar Order = !if(Ordered, "O", "U");
let VLMul = val_lmul.value in {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
- nf, Ordered>;
+ nf, Ordered>,
+ VSXSEGSched<nf, idx_eew, Order>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
- nf, Ordered>;
+ nf, Ordered>,
+ VSXSEGSched<nf, idx_eew, Order>;
}
}
}
More information about the llvm-commits
mailing list