[llvm] 9b92ae0 - [RISCV] Store Log2 of EEW in the vector load/store intrinsic to pseudo lookup tables. NFCI
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 7 15:48:32 PDT 2021
Author: Craig Topper
Date: 2021-06-07T15:47:45-07:00
New Revision: 9b92ae01ee57e332e52d14c073fa3235498a9f2c
URL: https://github.com/llvm/llvm-project/commit/9b92ae01ee57e332e52d14c073fa3235498a9f2c
DIFF: https://github.com/llvm/llvm-project/commit/9b92ae01ee57e332e52d14c073fa3235498a9f2c.diff
LOG: [RISCV] Store Log2 of EEW in the vector load/store intrinsic to pseudo lookup tables. NFCI
This uses 3 bits of data instead of 7. I'm wondering if we can use
bitfields for the lookup table key where this would matter.
I also name the shift_amount template to log2 since it is used
with more than just an srl now.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index df234d3c1358e..646e11bae7823 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -192,8 +192,9 @@ static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
}
void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
- SDNode *Node, unsigned SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked,
- bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, MVT *IndexVT) {
+ SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
+ bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
+ MVT *IndexVT) {
SDValue Chain = Node->getOperand(0);
SDValue Glue;
@@ -219,7 +220,7 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
Operands.push_back(VL);
MVT XLenVT = Subtarget->getXLenVT();
- SDValue SEWOp = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
+ SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
Operands.push_back(SEWOp);
Operands.push_back(Chain); // Chain.
@@ -232,7 +233,7 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
@@ -245,11 +246,11 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
CurOp += NF;
}
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
const RISCV::VLSEGPseudo *P =
- RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, ScalarSize,
+ RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
@@ -273,7 +274,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
MVT VT = Node->getSimpleValueType(0);
MVT XLenVT = Subtarget->getXLenVT();
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
@@ -286,12 +287,12 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
CurOp += NF;
}
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ false, Operands);
const RISCV::VLSEGPseudo *P =
RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
- ScalarSize, static_cast<unsigned>(LMUL));
+ Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
MVT::Other, MVT::Glue, Operands);
SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
@@ -317,7 +318,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
SDLoc DL(Node);
unsigned NF = Node->getNumValues() - 1;
MVT VT = Node->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
unsigned CurOp = 2;
@@ -331,16 +332,16 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
}
MVT IndexVT;
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
- unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
- NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
@@ -368,7 +369,7 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
if (IsMasked)
NF--;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
@@ -377,11 +378,11 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
Operands.push_back(StoreVal);
unsigned CurOp = 2 + NF;
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
- NF, IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
+ NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
@@ -398,7 +399,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
if (IsMasked)
--NF;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
@@ -408,16 +409,16 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
unsigned CurOp = 2 + NF;
MVT IndexVT;
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands, &IndexVT);
assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
"Element count mismatch");
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
- unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
- NF, IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
@@ -857,7 +858,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
IntNo == Intrinsic::riscv_vloxei_mask;
MVT VT = Node->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
@@ -865,7 +866,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
Operands.push_back(Node->getOperand(CurOp++));
MVT IndexVT;
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands,
&IndexVT);
@@ -874,9 +875,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
- unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
- IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -898,21 +899,21 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
MVT VT = Node->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2EEW = Log2_32(VT.getScalarSizeInBits());
// VLE1 uses an SEW of 8.
- unsigned SEW = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
+ unsigned Log2SEW = (IntNo == Intrinsic::riscv_vle1) ? 3 : Log2EEW;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
- addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
- RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, ScalarSize,
+ RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2EEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -928,20 +929,20 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
MVT VT = Node->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 7> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ false, Operands);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P =
- RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true,
- ScalarSize, static_cast<unsigned>(LMUL));
+ RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
+ static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
MVT::Other, MVT::Glue, Operands);
@@ -1049,14 +1050,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
IntNo == Intrinsic::riscv_vsoxei_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
MVT IndexVT;
- addVectorLoadStoreOperands(Node, ScalarSize, DL, CurOp, IsMasked,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
/*IsStridedOrIndexed*/ true, Operands,
&IndexVT);
@@ -1065,9 +1066,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
- unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
+ unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
- IsMasked, IsOrdered, IndexScalarSize, static_cast<unsigned>(LMUL),
+ IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
@@ -1089,20 +1090,20 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
- unsigned ScalarSize = VT.getScalarSizeInBits();
+ unsigned Log2EEW = Log2_32(VT.getScalarSizeInBits());
// VSE1 uses an SEW of 8.
- unsigned SEW = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
+ unsigned Log2SEW = (IntNo == Intrinsic::riscv_vse1) ? 3 : Log2EEW;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
- addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
+ addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
- IsMasked, IsStrided, ScalarSize, static_cast<unsigned>(LMUL));
+ IsMasked, IsStrided, Log2EEW, static_cast<unsigned>(LMUL));
MachineSDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
if (auto *MemOp = dyn_cast<MemSDNode>(Node))
@@ -1242,8 +1243,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue VL;
selectVLOp(Node->getOperand(1), VL);
- unsigned ScalarSize = VT.getScalarSizeInBits();
- SDValue SEW = CurDAG->getTargetConstant(Log2_32(ScalarSize), DL, XLenVT);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
SDValue Operands[] = {Ld->getBasePtr(),
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
@@ -1251,7 +1252,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
- /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, ScalarSize,
+ /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
static_cast<unsigned>(LMUL));
MachineSDNode *Load =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 0b4f2f428d328..70480d14eba54 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -98,7 +98,7 @@ struct VLSEGPseudo {
uint8_t Masked;
uint8_t Strided;
uint8_t FF;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
@@ -107,7 +107,7 @@ struct VLXSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Ordered;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
@@ -117,7 +117,7 @@ struct VSSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Strided;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
@@ -126,7 +126,7 @@ struct VSXSEGPseudo {
uint8_t NF;
uint8_t Masked;
uint8_t Ordered;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
@@ -136,7 +136,7 @@ struct VLEPseudo {
uint8_t Masked;
uint8_t Strided;
uint8_t FF;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
@@ -144,7 +144,7 @@ struct VLEPseudo {
struct VSEPseudo {
uint8_t Masked;
uint8_t Strided;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint16_t Pseudo;
};
@@ -152,7 +152,7 @@ struct VSEPseudo {
struct VLX_VSXPseudo {
uint8_t Masked;
uint8_t Ordered;
- uint8_t SEW;
+ uint8_t Log2SEW;
uint8_t LMUL;
uint8_t IndexLMUL;
uint16_t Pseudo;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e597d5ca17892..a3ab02ed0e9cb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -119,8 +119,8 @@ class NFSet<LMULInfo m> {
true: [2, 3, 4, 5, 6, 7, 8]);
}
-class shift_amount<int num> {
- int val = !if(!eq(num, 1), 0, !add(1, shift_amount<!srl(num, 1)>.val));
+class log2<int num> {
+ int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
}
class octuple_to_str<int octuple> {
@@ -159,7 +159,7 @@ class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
ValueType Vector = Vec;
ValueType Mask = Mas;
int SEW = Sew;
- int Log2SEW = shift_amount<Sew>.val;
+ int Log2SEW = log2<Sew>.val;
VReg RegClass = Reg;
LMULInfo LMul = M;
ValueType Scalar = Scal;
@@ -423,11 +423,11 @@ def RISCVVIntrinsicsTable : GenericTable {
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
-class RISCVVLE<bit M, bit Str, bit F, bits<7> S, bits<3> L> {
+class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
bits<1> Masked = M;
bits<1> Strided = Str;
bits<1> FF = F;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
@@ -435,15 +435,15 @@ class RISCVVLE<bit M, bit Str, bit F, bits<7> S, bits<3> L> {
def RISCVVLETable : GenericTable {
let FilterClass = "RISCVVLE";
let CppTypeName = "VLEPseudo";
- let Fields = ["Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"];
- let PrimaryKey = ["Masked", "Strided", "FF", "SEW", "LMUL"];
+ let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVLEPseudo";
}
-class RISCVVSE<bit M, bit Str, bits<7> S, bits<3> L> {
+class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
bits<1> Masked = M;
bits<1> Strided = Str;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
@@ -451,29 +451,29 @@ class RISCVVSE<bit M, bit Str, bits<7> S, bits<3> L> {
def RISCVVSETable : GenericTable {
let FilterClass = "RISCVVSE";
let CppTypeName = "VSEPseudo";
- let Fields = ["Masked", "Strided", "SEW", "LMUL", "Pseudo"];
- let PrimaryKey = ["Masked", "Strided", "SEW", "LMUL"];
+ let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVSEPseudo";
}
-class RISCVVLX_VSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<1> Masked = M;
bits<1> Ordered = O;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
-class RISCVVLX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> :
+class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
RISCVVLX_VSX<M, O, S, L, IL>;
-class RISCVVSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> :
+class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
RISCVVLX_VSX<M, O, S, L, IL>;
class RISCVVLX_VSXTable : GenericTable {
let CppTypeName = "VLX_VSXPseudo";
- let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
- let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
}
def RISCVVLXTable : RISCVVLX_VSXTable {
@@ -486,12 +486,12 @@ def RISCVVSXTable : RISCVVLX_VSXTable {
let PrimaryKeyName = "getVSXPseudo";
}
-class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
+class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
bits<1> FF = F;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
@@ -499,16 +499,16 @@ class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
def RISCVVLSEGTable : GenericTable {
let FilterClass = "RISCVVLSEG";
let CppTypeName = "VLSEGPseudo";
- let Fields = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL", "Pseudo"];
- let PrimaryKey = ["NF", "Masked", "Strided", "FF", "SEW", "LMUL"];
+ let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVLSEGPseudo";
}
-class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
@@ -517,16 +517,16 @@ class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
def RISCVVLXSEGTable : GenericTable {
let FilterClass = "RISCVVLXSEG";
let CppTypeName = "VLXSEGPseudo";
- let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
- let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVLXSEGPseudo";
}
-class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<7> S, bits<3> L> {
+class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
@@ -534,16 +534,16 @@ class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<7> S, bits<3> L> {
def RISCVVSSEGTable : GenericTable {
let FilterClass = "RISCVVSSEG";
let CppTypeName = "VSSEGPseudo";
- let Fields = ["NF", "Masked", "Strided", "SEW", "LMUL", "Pseudo"];
- let PrimaryKey = ["NF", "Masked", "Strided", "SEW", "LMUL"];
+ let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVSSEGPseudo";
}
-class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
+class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
- bits<7> SEW = S;
+ bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
@@ -552,8 +552,8 @@ class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
def RISCVVSXSEGTable : GenericTable {
let FilterClass = "RISCVVSXSEG";
let CppTypeName = "VSXSEGPseudo";
- let Fields = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
- let PrimaryKey = ["NF", "Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
+ let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
+ let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVSXSEGPseudo";
}
@@ -624,11 +624,11 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
let VLMul = m.value;
}
-class VPseudoUSLoadNoMask<VReg RetClass, bits<7> EEW, bit isFF> :
+class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
+ RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -638,13 +638,13 @@ class VPseudoUSLoadNoMask<VReg RetClass, bits<7> EEW, bit isFF> :
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSLoadMask<VReg RetClass, bits<7> EEW, bit isFF> :
+class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
+ RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -655,11 +655,11 @@ class VPseudoUSLoadMask<VReg RetClass, bits<7> EEW, bit isFF> :
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSLoadNoMask<VReg RetClass, bits<7> EEW>:
+class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> {
+ RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -669,13 +669,13 @@ class VPseudoSLoadNoMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSLoadMask<VReg RetClass, bits<7> EEW>:
+class VPseudoSLoadMask<VReg RetClass, int EEW>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, GPR:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> {
+ RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -686,12 +686,12 @@ class VPseudoSLoadMask<VReg RetClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
+ RISCVVLX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -702,14 +702,14 @@ class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoILoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, IdxClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
+ RISCVVLX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -720,11 +720,11 @@ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSStoreNoMask<VReg StClass, bits<7> EEW>:
+class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSE</*Masked*/0, /*Strided*/0, EEW, VLMul> {
+ RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -734,11 +734,11 @@ class VPseudoUSStoreNoMask<VReg StClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSStoreMask<VReg StClass, bits<7> EEW>:
+class VPseudoUSStoreMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSE</*Masked*/1, /*Strided*/0, EEW, VLMul> {
+ RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -747,11 +747,11 @@ class VPseudoUSStoreMask<VReg StClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSStoreNoMask<VReg StClass, bits<7> EEW>:
+class VPseudoSStoreNoMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSE</*Masked*/0, /*Strided*/1, EEW, VLMul> {
+ RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -761,11 +761,11 @@ class VPseudoSStoreNoMask<VReg StClass, bits<7> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSStoreMask<VReg StClass, bits<7> EEW>:
+class VPseudoSStoreMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSE</*Masked*/1, /*Strided*/1, EEW, VLMul> {
+ RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -909,12 +909,12 @@ class VPseudoBinaryNoMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
+ RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -924,12 +924,12 @@ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoIStoreMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
+ RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1085,7 +1085,7 @@ multiclass VPseudoAMOEI<int eew> {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar emul= !cast<LMULInfo>("V_" # emulMX);
@@ -1100,14 +1100,14 @@ multiclass VPseudoAMOEI<int eew> {
multiclass VPseudoAMO {
foreach eew = EEWList in
- defm "EI" # eew : VPseudoAMOEI<eew>;
+ defm "EI" # eew : VPseudoAMOEI<eew>;
}
-class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
+class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1117,12 +1117,12 @@ class VPseudoUSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
+class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1133,11 +1133,11 @@ class VPseudoUSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF, bit isFF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
+class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayLoad = 1;
let mayStore = 0;
@@ -1148,12 +1148,12 @@ class VPseudoSSegLoadNoMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
+class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, EEW, VLMul> {
+ RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1164,12 +1164,12 @@ class VPseudoSSegLoadMask<VReg RetClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
+ RISCVVLXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1182,13 +1182,13 @@ class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> L
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVLXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
+ RISCVVLXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
@@ -1201,11 +1201,11 @@ class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMU
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
+class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1215,12 +1215,12 @@ class VPseudoUSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
+class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1229,11 +1229,11 @@ class VPseudoUSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
+class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1243,12 +1243,12 @@ class VPseudoSSegStoreNoMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
+class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, EEW, VLMul> {
+ RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1257,13 +1257,13 @@ class VPseudoSSegStoreMask<VReg ValClass, bits<7> EEW, bits<4> NF>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSXSEG<NF, /*Masked*/0, Ordered, EEW, VLMul, LMUL> {
+ RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1273,13 +1273,13 @@ class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3>
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
+class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
- RISCVVSXSEG<NF, /*Masked*/1, Ordered, EEW, VLMul, LMUL> {
+ RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
@@ -1331,7 +1331,7 @@ multiclass VPseudoILoad<bit Ordered> {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar LInfo = lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
@@ -1391,7 +1391,7 @@ multiclass VPseudoIStore<bit Ordered> {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar LInfo = lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
@@ -1525,7 +1525,7 @@ multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> {
foreach sew = EEWList in {
defvar octuple_lmul = m.octuple;
// emul = lmul * eew / sew
- defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar emul = !cast<LMULInfo>("V_" # emulMX);
@@ -2049,7 +2049,7 @@ multiclass VPseudoISegLoad<bit Ordered> {
foreach val_lmul = MxSet<sew>.m in {
defvar octuple_lmul = val_lmul.octuple;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar ValLInfo = val_lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
@@ -2109,7 +2109,7 @@ multiclass VPseudoISegStore<bit Ordered> {
foreach val_lmul = MxSet<sew>.m in {
defvar octuple_lmul = val_lmul.octuple;
// Calculate emul = eew * lmul / sew
- defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), shift_amount<sew>.val);
+ defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar ValLInfo = val_lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
More information about the llvm-commits
mailing list