[llvm] [RISCV] Move vector load/store segment instructions upwards into 'Vector Loads and Stores'. NFC (PR #77297)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 8 03:28:36 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Ying Chen (punkyc)
<details>
<summary>Changes</summary>
This is based on the standard 'V' Vector extension, version 1.0. Currently, the definitions of vector load/store segment instructions in chapter 7.8 is put in the very back, which is wired.
So, I just move the code. Hopefully this is ok.
---
Full diff: https://github.com/llvm/llvm-project/pull/77297.diff
1 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoV.td (+89-87)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 9fc9a29c210df2..d6c32dfc328bf4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1097,6 +1097,95 @@ def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
} // Predicates = [HasVInstructions]
+let Predicates = [HasVInstructions] in {
+ foreach nf=2-8 in {
+ foreach eew = [8, 16, 32] in {
+ defvar w = !cast<RISCVWidth>("LSWidth"#eew);
+
+ // Vector Unit-strided Segment Instructions
+ def VLSEG#nf#E#eew#_V :
+ VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
+ VLSEGSchedMC<nf, eew>;
+ def VLSEG#nf#E#eew#FF_V :
+ VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
+ VLSEGFFSchedMC<nf, eew>;
+ def VSSEG#nf#E#eew#_V :
+ VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
+ VSSEGSchedMC<nf, eew>;
+
+ // Vector Strided Segment Instructions
+ def VLSSEG#nf#E#eew#_V :
+ VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
+ VLSSEGSchedMC<nf, eew>;
+ def VSSSEG#nf#E#eew#_V :
+ VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
+ VSSSEGSchedMC<nf, eew>;
+
+ // Vector Indexed Segment Instructions
+ def VLUXSEG#nf#EI#eew#_V :
+ VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
+ "vluxseg"#nf#"ei"#eew#".v">,
+ VLXSEGSchedMC<nf, eew, isOrdered=0>;
+ def VLOXSEG#nf#EI#eew#_V :
+ VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
+ "vloxseg"#nf#"ei"#eew#".v">,
+ VLXSEGSchedMC<nf, eew, isOrdered=1>;
+ def VSUXSEG#nf#EI#eew#_V :
+ VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
+ "vsuxseg"#nf#"ei"#eew#".v">,
+ VSXSEGSchedMC<nf, eew, isOrdered=0>;
+ def VSOXSEG#nf#EI#eew#_V :
+ VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
+ "vsoxseg"#nf#"ei"#eew#".v">,
+ VSXSEGSchedMC<nf, eew, isOrdered=1>;
+ }
+ }
+} // Predicates = [HasVInstructions]
+
+let Predicates = [HasVInstructionsI64] in {
+ foreach nf=2-8 in {
+ // Vector Unit-strided Segment Instructions
+ def VLSEG#nf#E64_V :
+ VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
+ VLSEGSchedMC<nf, 64>;
+ def VLSEG#nf#E64FF_V :
+ VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
+ VLSEGFFSchedMC<nf, 64>;
+ def VSSEG#nf#E64_V :
+ VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
+ VSSEGSchedMC<nf, 64>;
+
+ // Vector Strided Segment Instructions
+ def VLSSEG#nf#E64_V :
+ VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
+ VLSSEGSchedMC<nf, 64>;
+ def VSSSEG#nf#E64_V :
+ VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
+ VSSSEGSchedMC<nf, 64>;
+ }
+} // Predicates = [HasVInstructionsI64]
+let Predicates = [HasVInstructionsI64, IsRV64] in {
+ foreach nf = 2 - 8 in {
+ // Vector Indexed Segment Instructions
+ def VLUXSEG #nf #EI64_V
+ : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
+ "vluxseg" #nf #"ei64.v">,
+ VLXSEGSchedMC<nf, 64, isOrdered=0>;
+ def VLOXSEG #nf #EI64_V
+ : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
+ "vloxseg" #nf #"ei64.v">,
+ VLXSEGSchedMC<nf, 64, isOrdered=1>;
+ def VSUXSEG #nf #EI64_V
+ : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
+ "vsuxseg" #nf #"ei64.v">,
+ VSXSEGSchedMC<nf, 64, isOrdered=0>;
+ def VSOXSEG #nf #EI64_V
+ : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
+ "vsoxseg" #nf #"ei64.v">,
+ VSXSEGSchedMC<nf, 64, isOrdered=1>;
+ }
+} // Predicates = [HasVInstructionsI64, IsRV64]
+
let Predicates = [HasVInstructions] in {
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
@@ -1711,92 +1800,5 @@ foreach n = [1, 2, 4, 8] in {
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
} // Predicates = [HasVInstructions]
-let Predicates = [HasVInstructions] in {
- foreach nf=2-8 in {
- foreach eew = [8, 16, 32] in {
- defvar w = !cast<RISCVWidth>("LSWidth"#eew);
-
- def VLSEG#nf#E#eew#_V :
- VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
- VLSEGSchedMC<nf, eew>;
- def VLSEG#nf#E#eew#FF_V :
- VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
- VLSEGFFSchedMC<nf, eew>;
- def VSSEG#nf#E#eew#_V :
- VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
- VSSEGSchedMC<nf, eew>;
- // Vector Strided Instructions
- def VLSSEG#nf#E#eew#_V :
- VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
- VLSSEGSchedMC<nf, eew>;
- def VSSSEG#nf#E#eew#_V :
- VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
- VSSSEGSchedMC<nf, eew>;
-
- // Vector Indexed Instructions
- def VLUXSEG#nf#EI#eew#_V :
- VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
- "vluxseg"#nf#"ei"#eew#".v">,
- VLXSEGSchedMC<nf, eew, isOrdered=0>;
- def VLOXSEG#nf#EI#eew#_V :
- VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
- "vloxseg"#nf#"ei"#eew#".v">,
- VLXSEGSchedMC<nf, eew, isOrdered=1>;
- def VSUXSEG#nf#EI#eew#_V :
- VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
- "vsuxseg"#nf#"ei"#eew#".v">,
- VSXSEGSchedMC<nf, eew, isOrdered=0>;
- def VSOXSEG#nf#EI#eew#_V :
- VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
- "vsoxseg"#nf#"ei"#eew#".v">,
- VSXSEGSchedMC<nf, eew, isOrdered=1>;
- }
- }
-} // Predicates = [HasVInstructions]
-
-let Predicates = [HasVInstructionsI64] in {
- foreach nf=2-8 in {
- // Vector Unit-strided Segment Instructions
- def VLSEG#nf#E64_V :
- VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
- VLSEGSchedMC<nf, 64>;
- def VLSEG#nf#E64FF_V :
- VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
- VLSEGFFSchedMC<nf, 64>;
- def VSSEG#nf#E64_V :
- VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
- VSSEGSchedMC<nf, 64>;
-
- // Vector Strided Segment Instructions
- def VLSSEG#nf#E64_V :
- VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
- VLSSEGSchedMC<nf, 64>;
- def VSSSEG#nf#E64_V :
- VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
- VSSSEGSchedMC<nf, 64>;
- }
-} // Predicates = [HasVInstructionsI64]
-let Predicates = [HasVInstructionsI64, IsRV64] in {
- foreach nf = 2 - 8 in {
- // Vector Indexed Segment Instructions
- def VLUXSEG #nf #EI64_V
- : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
- "vluxseg" #nf #"ei64.v">,
- VLXSEGSchedMC<nf, 64, isOrdered=0>;
- def VLOXSEG #nf #EI64_V
- : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
- "vloxseg" #nf #"ei64.v">,
- VLXSEGSchedMC<nf, 64, isOrdered=1>;
- def VSUXSEG #nf #EI64_V
- : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
- "vsuxseg" #nf #"ei64.v">,
- VSXSEGSchedMC<nf, 64, isOrdered=0>;
- def VSOXSEG #nf #EI64_V
- : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
- "vsoxseg" #nf #"ei64.v">,
- VSXSEGSchedMC<nf, 64, isOrdered=1>;
- }
-} // Predicates = [HasVInstructionsI64, IsRV64]
-
include "RISCVInstrInfoZvfbf.td"
include "RISCVInstrInfoVPseudos.td"
``````````
</details>
https://github.com/llvm/llvm-project/pull/77297
More information about the llvm-commits
mailing list