[llvm] 5cf73dc - [RISCV] Convert most of the information about RVV Pseudos into bits in TSFlags.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 10 19:18:24 PST 2021


Author: Craig Topper
Date: 2021-01-10T19:15:45-08:00
New Revision: 5cf73dca77e52f54c893d2c5fc2f56a5f2764f7d

URL: https://github.com/llvm/llvm-project/commit/5cf73dca77e52f54c893d2c5fc2f56a5f2764f7d
DIFF: https://github.com/llvm/llvm-project/commit/5cf73dca77e52f54c893d2c5fc2f56a5f2764f7d.diff

LOG: [RISCV] Convert most of the information about RVV Pseudos into bits in TSFlags.

This patch moves all but the BaseInstr to bits in TSFlags.

For the index fields, we can just use a bit to indicate their presence.
The locations of the operands are well defined.

This reduces the llc binary by about 32K on my build. It also
removes the binary search of the table from the custom inserter.
Instead we just check that the SEW op is present.

Reviewed By: rogfer01

Differential Revision: https://reviews.llvm.org/D94375

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrFormats.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
    llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 22d15bc8586b..2349b43f30c6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2122,7 +2122,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
 
 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
                                     int VLIndex, unsigned SEWIndex,
-                                    unsigned VLMul, bool WritesElement0) {
+                                    RISCVVLMUL VLMul, bool WritesElement0) {
   MachineFunction &MF = *BB->getParent();
   DebugLoc DL = MI.getDebugLoc();
   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
@@ -2131,9 +2131,6 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
 
-  // LMUL should already be encoded correctly.
-  RISCVVLMUL Multiplier = static_cast<RISCVVLMUL>(VLMul);
-
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
   // VL and VTYPE are alive here.
@@ -2160,7 +2157,7 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
     TailAgnostic = false;
 
   // For simplicity we reuse the vtype representation here.
-  MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth,
+  MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
                                      /*TailAgnostic*/ TailAgnostic,
                                      /*MaskAgnostic*/ false));
 
@@ -2177,15 +2174,17 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
 MachineBasicBlock *
 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                                  MachineBasicBlock *BB) const {
+  uint64_t TSFlags = MI.getDesc().TSFlags;
 
-  if (const RISCVVPseudosTable::PseudoInfo *RVV =
-          RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) {
-    int VLIndex = RVV->getVLIndex();
-    int SEWIndex = RVV->getSEWIndex();
-    bool WritesElement0 = RVV->writesElement0();
+  if (TSFlags & RISCVII::HasSEWOpMask) {
+    unsigned NumOperands = MI.getNumExplicitOperands();
+    int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
+    unsigned SEWIndex = NumOperands - 1;
+    bool WritesElement0 = TSFlags & RISCVII::WritesElement0Mask;
 
-    assert(SEWIndex >= 0 && "SEWIndex must be >= 0");
-    return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul, WritesElement0);
+    RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
+                                               RISCVII::VLMulShift);
+    return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, WritesElement0);
   }
 
   switch (MI.getOpcode()) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 5c8d8fa65b30..ea867c549e64 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -167,6 +167,24 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
   // Defaults
   RISCVVConstraint RVVConstraint = NoConstraint;
   let TSFlags{7-5} = RVVConstraint.Value;
+
+  bits<3> VLMul = 0;
+  let TSFlags{10-8} = VLMul;
+
+  bit HasDummyMask = 0;
+  let TSFlags{11} = HasDummyMask;
+
+  bit WritesElement0 = 0;
+  let TSFlags{12} = WritesElement0;
+
+  bit HasMergeOp = 0;
+  let TSFlags{13} = HasMergeOp;
+
+  bit HasSEWOp = 0;
+  let TSFlags{14} = HasSEWOp;
+
+  bit HasVLOp = 0;
+  let TSFlags{15} = HasVLOp;
 }
 
 // Pseudo instructions

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index ac179619db61..8f494d54ee64 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -330,20 +330,13 @@ def InvalidIndex : CONST8b<0x80>;
 class RISCVVPseudo {
   Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
   Instruction BaseInstr;
-  bits<8> VLIndex = InvalidIndex.V;
-  bits<8> SEWIndex = InvalidIndex.V;
-  bits<8> MergeOpIndex = InvalidIndex.V;
-  bits<3> VLMul;
-  bit HasDummyMask = 0;
-  bit WritesElement0 = 0;
 }
 
 // The actual table.
 def RISCVVPseudosTable : GenericTable {
   let FilterClass = "RISCVVPseudo";
   let CppTypeName = "PseudoInfo";
-  let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex",
-                 "VLMul", "HasDummyMask", "WritesElement0" ];
+  let Fields = [ "Pseudo", "BaseInstr" ];
   let PrimaryKey = [ "Pseudo" ];
   let PrimaryKeyName = "getPseudoInfo";
 }
@@ -416,8 +409,8 @@ class VPseudoUSLoadNoMask<VReg RetClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 2;
-  let SEWIndex = 3;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -434,9 +427,9 @@ class VPseudoUSLoadMask<VReg RetClass>:
   let usesCustomInserter = 1;
   let Constraints = "$rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -449,8 +442,8 @@ class VPseudoSLoadNoMask<VReg RetClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -467,9 +460,9 @@ class VPseudoSLoadMask<VReg RetClass>:
   let usesCustomInserter = 1;
   let Constraints = "$rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 5;
-  let SEWIndex = 6;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -482,8 +475,8 @@ class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -500,9 +493,9 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass>:
   let usesCustomInserter = 1;
   let Constraints = "$rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 5;
-  let SEWIndex = 6;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -515,8 +508,8 @@ class VPseudoUSStoreNoMask<VReg StClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 2;
-  let SEWIndex = 3;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -530,8 +523,8 @@ class VPseudoUSStoreMask<VReg StClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -544,8 +537,8 @@ class VPseudoSStoreNoMask<VReg StClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -559,8 +552,8 @@ class VPseudoSStoreMask<VReg StClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -575,8 +568,8 @@ class VPseudoUnaryNoDummyMask<VReg RetClass,
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 2;
-  let SEWIndex = 3;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -589,8 +582,8 @@ class VPseudoNullaryNoMask<VReg RegClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 1;
-  let SEWIndex = 2;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -605,9 +598,9 @@ class VPseudoNullaryMask<VReg RegClass>:
   let usesCustomInserter = 1;
   let Constraints ="$rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -621,8 +614,8 @@ class VPseudoNullaryPseudoM<string BaseInst>
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 1;
-  let SEWIndex = 2;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   // BaseInstr is not used in RISCVExpandPseudoInsts pass.
   // Just fill a corresponding real v-inst to pass tablegen check.
   let BaseInstr = !cast<Instruction>(BaseInst);
@@ -639,8 +632,8 @@ class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint =
   let usesCustomInserter = 1;
   let Constraints = Constraint;
   let Uses = [VL, VTYPE];
-  let VLIndex = 2;
-  let SEWIndex = 3;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -656,9 +649,9 @@ class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
   let usesCustomInserter = 1;
   let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -672,8 +665,8 @@ class VPseudoMaskUnarySOutMask:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -688,9 +681,9 @@ class VPseudoUnaryMOutMask:
   let usesCustomInserter = 1;
   let Constraints = "$rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -709,9 +702,9 @@ class VPseudoUnaryAnyMask<VReg RetClass,
   let usesCustomInserter = 1;
   let Constraints = "@earlyclobber $rd, $rd = $merge";
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -728,8 +721,8 @@ class VPseudoBinaryNoMask<VReg RetClass,
   let usesCustomInserter = 1;
   let Constraints = Constraint;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -743,8 +736,8 @@ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 3;
-  let SEWIndex = 4;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -758,8 +751,8 @@ class VPseudoIStoreMask<VReg StClass, VReg IdxClass>:
   let hasSideEffects = 0;
   let usesCustomInserter = 1;
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -778,9 +771,9 @@ class VPseudoBinaryMask<VReg RetClass,
   let usesCustomInserter = 1;
   let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
   let Uses = [VL, VTYPE];
-  let VLIndex = 5;
-  let SEWIndex = 6;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
@@ -802,9 +795,9 @@ class VPseudoBinaryCarryIn<VReg RetClass,
   let usesCustomInserter = 1;
   let Constraints = Constraint;
   let Uses = [VL, VTYPE];
-  let VLIndex = !if(CarryIn, 4, 3);
-  let SEWIndex = !if(CarryIn, 5, 4);
-  let MergeOpIndex = InvalidIndex.V;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 0;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
   let VLMul = MInfo.value;
 }
@@ -824,9 +817,9 @@ class VPseudoTernaryNoMask<VReg RetClass,
   let usesCustomInserter = 1;
   let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
   let Uses = [VL, VTYPE];
-  let VLIndex = 4;
-  let SEWIndex = 5;
-  let MergeOpIndex = 1;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
   let HasDummyMask = 1;
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
@@ -2865,11 +2858,11 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
     Uses = [VL, VTYPE] in {
   foreach m = MxList.m in {
     let VLMul = m.value in {
-      let SEWIndex = 2, BaseInstr = VMV_X_S in
+      let HasSEWOp = 1, BaseInstr = VMV_X_S in
       def PseudoVMV_X_S # "_" # m.MX: Pseudo<(outs GPR:$rd),
                                              (ins m.vrclass:$rs2, ixlenimm:$sew),
                                              []>, RISCVVPseudo;
-      let VLIndex = 3, SEWIndex = 4, BaseInstr = VMV_S_X, WritesElement0 = 1,
+      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, WritesElement0 = 1,
           Constraints = "$rd = $rs1" in
       def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
                                              (ins m.vrclass:$rs1, GPR:$rs2,
@@ -2889,12 +2882,12 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
     Uses = [VL, VTYPE] in {
   foreach m = MxList.m in {
     let VLMul = m.value in {
-      let SEWIndex = 2, BaseInstr = VFMV_F_S in
+      let HasSEWOp = 1, BaseInstr = VFMV_F_S in
       def PseudoVFMV_F_S # "_" # m.MX : Pseudo<(outs FPR32:$rd),
                                                (ins m.vrclass:$rs2,
                                                     ixlenimm:$sew),
                                                []>, RISCVVPseudo;
-      let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, WritesElement0 = 1,
+      let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, WritesElement0 = 1,
           Constraints = "$rd = $rs1" in
       def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd),
                                                (ins m.vrclass:$rs1, FPR32:$rs2,

diff  --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index a93a1e38c656..8483ec381014 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -147,15 +147,25 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
       MF->getSubtarget<RISCVSubtarget>().getRegisterInfo();
   assert(TRI && "TargetRegisterInfo expected");
 
+  uint64_t TSFlags = MI->getDesc().TSFlags;
+  int NumOps = MI->getNumExplicitOperands();
+
   for (const MachineOperand &MO : MI->explicit_operands()) {
     int OpNo = (int)MI->getOperandNo(&MO);
     assert(OpNo >= 0 && "Operand number doesn't fit in an 'int' type");
 
-    // Skip VL, SEW and MergeOp operands
-    if (OpNo == RVV->getVLIndex() || OpNo == RVV->getSEWIndex() ||
-        OpNo == RVV->getMergeOpIndex())
+    // Skip VL and SEW operands which are the last two operands if present.
+    if ((TSFlags & RISCVII::HasVLOpMask) && OpNo == (NumOps - 2))
+      continue;
+    if ((TSFlags & RISCVII::HasSEWOpMask) && OpNo == (NumOps - 1))
       continue;
 
+    // Skip merge op. It should be the first operand after the result.
+    if ((TSFlags & RISCVII::HasMergeOpMask) && OpNo == 1) {
+      assert(MI->getNumExplicitDefs() == 1);
+      continue;
+    }
+
     MCOperand MCOp;
     switch (MO.getType()) {
     default:
@@ -182,7 +192,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
 
   // Unmasked pseudo instructions need to append dummy mask operand to
   // V instructions. All V instructions are modeled as the masked version.
-  if (RVV->hasDummyMask())
+  if (TSFlags & RISCVII::HasDummyMaskOpMask)
     OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister));
 
   return true;

diff  --git a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
index a6162ef5f852..5bc2ba66ebb9 100644
--- a/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -49,6 +49,33 @@ enum {
 
   ConstraintShift = 5,
   ConstraintMask = 0b111 << ConstraintShift,
+
+  VLMulShift = ConstraintShift + 3,
+  VLMulMask = 0b111 << VLMulShift,
+
+  // Do we need to add a dummy mask op when converting RVV Pseudo to MCInst.
+  HasDummyMaskOpShift = VLMulShift + 3,
+  HasDummyMaskOpMask = 1 << HasDummyMaskOpShift,
+
+  // Does this instruction only update element 0 the destination register.
+  WritesElement0Shift = HasDummyMaskOpShift + 1,
+  WritesElement0Mask = 1 << WritesElement0Shift,
+
+  // Does this instruction have a merge operand that must be removed when
+  // converting to MCInst. It will be the first explicit use operand. Used by
+  // RVV Pseudos.
+  HasMergeOpShift = WritesElement0Shift + 1,
+  HasMergeOpMask = 1 << HasMergeOpShift,
+
+  // Does this instruction have a SEW operand. It will be the last explicit
+  // operand. Used by RVV Pseudos.
+  HasSEWOpShift = HasMergeOpShift + 1,
+  HasSEWOpMask = 1 << HasSEWOpShift,
+
+  // Does this instruction have a VL operand. It will be the second to last
+  // explicit operand. Used by RVV Pseudos.
+  HasVLOpShift = HasSEWOpShift + 1,
+  HasVLOpMask = 1 << HasVLOpShift,
 };
 
 // Match with the definitions in RISCVInstrFormatsV.td
@@ -373,22 +400,6 @@ static const uint8_t InvalidIndex = 0x80;
 struct PseudoInfo {
   uint16_t Pseudo;
   uint16_t BaseInstr;
-  uint8_t VLIndex;
-  uint8_t SEWIndex;
-  uint8_t MergeOpIndex;
-  uint8_t VLMul;
-  bool HasDummyMask;
-  bool WritesElement0;
-
-  int getVLIndex() const { return static_cast<int8_t>(VLIndex); }
-
-  int getSEWIndex() const { return static_cast<int8_t>(SEWIndex); }
-
-  int getMergeOpIndex() const { return static_cast<int8_t>(MergeOpIndex); }
-
-  bool hasDummyMask() const { return HasDummyMask; }
-
-  bool writesElement0() const { return WritesElement0; }
 };
 
 using namespace RISCV;


        


More information about the llvm-commits mailing list