[llvm-branch-commits] [llvm] 15ce0ab - [RISCV] Refine vector load/store tablegen pattern, NFC.

Zakk Chen via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Dec 15 19:02:13 PST 2020


Author: Zakk Chen
Date: 2020-12-15T18:55:55-08:00
New Revision: 15ce0ab7ac46382ec38e7de59ec40c099b85cbf7

URL: https://github.com/llvm/llvm-project/commit/15ce0ab7ac46382ec38e7de59ec40c099b85cbf7
DIFF: https://github.com/llvm/llvm-project/commit/15ce0ab7ac46382ec38e7de59ec40c099b85cbf7.diff

LOG: [RISCV] Refine vector load/store tablegen pattern, NFC.

Refine tablegen pattern for vector load/store, and follow
D93012 to separate masked and unmasked definitions for
pseudo load/store instructions.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93284

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 81c47abab595..25fd7435affd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -277,6 +277,68 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
   let VLMul = m.value;
 }
 
+class VPseudoUSLoadNoMask<VReg RetClass>:
+      Pseudo<(outs RetClass:$rd),
+             (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 2;
+  let SEWIndex = 3;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSLoadMask<VReg RetClass>:
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+              (ins GetVRegNoV0<RetClass>.R:$merge,
+                   GPR:$rs1,
+                   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = "$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let MergeOpIndex = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSStoreNoMask<VReg StClass>:
+      Pseudo<(outs),
+              (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 2;
+  let SEWIndex = 3;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSStoreMask<VReg StClass>:
+      Pseudo<(outs),
+              (ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 3;
+  let SEWIndex = 4;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 class VPseudoBinaryNoMask<VReg RetClass,
                           VReg Op1Class,
                           DAGOperand Op2Class,
@@ -342,6 +404,28 @@ class VPseudoBinaryCarryIn<VReg RetClass,
   let VLMul = MInfo.value;
 }
 
+multiclass VPseudoUSLoad {
+  foreach lmul = MxList.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    let VLMul = lmul.value in {
+      def "_V_" # LInfo : VPseudoUSLoadNoMask<vreg>;
+      def "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask<vreg>;
+    }
+  }
+}
+
+multiclass VPseudoUSStore {
+  foreach lmul = MxList.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    let VLMul = lmul.value in {
+      def "_V_" # LInfo : VPseudoUSStoreNoMask<vreg>;
+      def "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg>;
+    }
+  }
+}
+
 multiclass VPseudoBinary<VReg RetClass,
                          VReg Op1Class,
                          DAGOperand Op2Class,
@@ -519,8 +603,32 @@ multiclass VPseudoBinaryV_WV_WX_WI {
 }
 
 //===----------------------------------------------------------------------===//
-// Helpers to define the 
diff erent patterns.
+// Helpers to define the SDNode patterns.
 //===----------------------------------------------------------------------===//
+
+multiclass VPatUSLoadStoreSDNode<LLVMType type,
+                                 LLVMType mask_type,
+                                 int sew,
+                                 LMULInfo vlmul,
+                                 RegisterClass reg_rs1,
+                                 VReg reg_class>
+{
+  defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
+  defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
+  // Load
+  def : Pat<(type (load reg_rs1:$rs1)),
+            (load_instr reg_rs1:$rs1, VLMax, sew)>;
+  // Store
+  def : Pat<(store type:$rs2, reg_rs1:$rs1),
+            (store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>;
+}
+
+multiclass VPatUSLoadStoreSDNodes<RegisterClass reg_rs1> {
+  foreach vti = AllVectors in
+    defm "" : VPatUSLoadStoreSDNode<vti.Vector, vti.Mask, vti.SEW, vti.LMul,
+                                    reg_rs1, vti.RegClass>;
+}
+
 class VPatBinarySDNode<SDNode vop,
                        string instruction_name,
                        ValueType result_type,
@@ -546,6 +654,9 @@ multiclass VPatBinarySDNode<SDNode vop, string instruction_name>
                            vti.LMul, vti.RegClass, vti.RegClass>;
 }
 
+//===----------------------------------------------------------------------===//
+// Helpers to define the intrinsic patterns.
+//===----------------------------------------------------------------------===//
 class VPatBinaryNoMask<string intrinsic_name,
                        string inst,
                        string kind,
@@ -922,83 +1033,10 @@ def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>
 // 7. Vector Loads and Stores
 //===----------------------------------------------------------------------===//
 
-// Pseudos.
+// Pseudos Unit-Stride Loads and Stores
 foreach eew = EEWList in {
-  foreach lmul = MxList.m in {
-    defvar LInfo = lmul.MX;
-    defvar vreg = lmul.vrclass;
-    defvar vlmul = lmul.value;
-    defvar constraint = "$rd = $merge";
-
-    let mayLoad = 1, mayStore = 0, hasSideEffects = 0,
-        usesCustomInserter = 1,
-        VLMul = vlmul in
-    {
-      let Uses = [VL, VTYPE], VLIndex = 4, SEWIndex = 5, MergeOpIndex = 1,
-          Constraints = constraint,
-          BaseInstr = !cast<Instruction>("VLE" # eew # "_V") in
-          def "PseudoVLE" # eew # "_V_" # LInfo
-            : Pseudo<(outs vreg:$rd),
-                     (ins vreg:$merge, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
-                      ixlenimm:$sew),
-                     []>,
-              RISCVVPseudo;
-    }
-
-    let mayLoad = 0, mayStore = 1, hasSideEffects = 0,
-        usesCustomInserter = 1,
-        VLMul = vlmul in
-    {
-      // Masked stores do not have a merge operand as merge is done in memory
-      let Uses = [VL, VTYPE],
-          VLIndex = 3, SEWIndex = 4, MergeOpIndex = -1,
-          BaseInstr = !cast<Instruction>("VSE" # eew # "_V") in
-        def "PseudoVSE" # eew # "_V_" # LInfo
-            : Pseudo<(outs),
-                     (ins vreg:$rd, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
-                          ixlenimm:$sew),
-                     []>,
-              RISCVVPseudo;
-    }
-  }
-}
-
-// Patterns.
-multiclass pat_load_store<LLVMType type,
-                          LLVMType mask_type,
-                          int sew,
-                          LMULInfo vlmul,
-                          VReg reg_class>
-{
-  defvar load_instr = !cast<Instruction>("PseudoVLE" # sew # "_V_"# vlmul.MX);
-  defvar store_instr = !cast<Instruction>("PseudoVSE" # sew # "_V_"# vlmul.MX);
-  // Load
-  def : Pat<(type (load GPR:$rs1)),
-            (load_instr (type (IMPLICIT_DEF)),
-             GPR:$rs1,
-             (mask_type zero_reg),
-             VLMax, sew)>;
-  def : Pat<(type (load AddrFI:$rs1)),
-             (load_instr (type (IMPLICIT_DEF)),
-             AddrFI:$rs1,
-             (mask_type zero_reg),
-             VLMax, sew)>;
-
-  // Store
-  def : Pat<(store type:$rs2, GPR:$rs1),
-            (store_instr reg_class:$rs2, GPR:$rs1,
-             (mask_type zero_reg),
-              VLMax, sew)>;
-  def : Pat<(store type:$rs2, AddrFI:$rs1),
-            (store_instr reg_class:$rs2, AddrFI:$rs1,
-             (mask_type zero_reg),
-             VLMax, sew)>;
-}
-
-foreach vti = AllVectors in
-{
-  defm : pat_load_store<vti.Vector, vti.Mask,
-                        vti.SEW, vti.LMul, vti.RegClass>;
+  defm PseudoVLE # eew : VPseudoUSLoad;
+  defm PseudoVSE # eew : VPseudoUSStore;
 }
 
 //===----------------------------------------------------------------------===//
@@ -1078,6 +1116,12 @@ defm PseudoVFRSUB      : VPseudoBinaryV_VX</*IsFloat=*/1>;
 let Predicates = [HasStdExtV] in {
 
 // Whole-register vector patterns.
+
+// 7.4. Vector Unit-Stride Instructions
+defm "" : VPatUSLoadStoreSDNodes<GPR>;
+defm "" : VPatUSLoadStoreSDNodes<AddrFI>;
+
+// 12.1. Vector Single-Width Integer Add and Subtract
 defm "" : VPatBinarySDNode<add, "PseudoVADD">;
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
index 049de4e1bd23..f323bf1b3161 100644
--- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
@@ -27,27 +27,26 @@ body:             |
     %2:gpr = COPY $x12
     %1:gpr = COPY $x11
     %0:gpr = COPY $x10
-    %5:vr = IMPLICIT_DEF
-    %4:vr = PseudoVLE64_V_M1 %5, %1, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-    %7:vr = IMPLICIT_DEF
-    %6:vr = PseudoVLE64_V_M1 %7, %2, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
-    %8:vr = PseudoVADD_VV_M1 killed %4, killed %6, %3, 64, implicit $vl, implicit $vtype
-    PseudoVSE64_V_M1 killed %8, %0, $noreg, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+    %4:vr = PseudoVLE64_V_M1 %1, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+    %5:vr = PseudoVLE64_V_M1 %2, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+    %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 64, implicit $vl, implicit $vtype
+    PseudoVSE64_V_M1 killed %6, %0, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
     PseudoRET
 
 ...
 
 # POST-INSERTER: %0:gpr = COPY $x13
-# POST-INSERTER: %4:vr = IMPLICIT_DEF
+# POST-INSERTER: %1:gpr = COPY $x12
+# POST-INSERTER: %2:gpr = COPY $x11
+# POST-INSERTER: %3:gpr = COPY $x10
+# POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+# POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
+# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
 # POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-# POST-INSERTER: %6:vr = IMPLICIT_DEF
+# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, -1, implicit $vl, implicit $vtype
 # POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
-# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: %8:vr = PseudoVADD_VV_M1 killed %5, killed %7, $noreg, -1, implicit $vl, implicit $vtype
-# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
-# POST-INSERTER: PseudoVSE64_V_M1 killed %8, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
 
 # CODEGEN: vsetvli	a3, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vle64.v	v25, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
index b8093f5008e3..53f316f61e92 100644
--- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll
@@ -20,20 +20,16 @@ define void @vadd_vint64m1(
   ret void
 }
 
-; PRE-INSERTER: %4:vr = IMPLICIT_DEF
-; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-; PRE-INSERTER: %6:vr = IMPLICIT_DEF
-; PRE-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
-; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $x0, 64, implicit $vl, implicit $vtype
-; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
+; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype
+; PRE-INSERTER:  PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
 
-; POST-INSERTER: %4:vr = IMPLICIT_DEF
+; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
+; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
+; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
 ; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
-; POST-INSERTER: %6:vr = IMPLICIT_DEF
+; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype
 ; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
-; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $noreg, -1, implicit $vl, implicit $vtype
-; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
-; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
+; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)


        


More information about the llvm-branch-commits mailing list