[llvm] r269379 - SDAG: Implement Select instead of SelectImpl in AArch64DAGToDAGISel
Justin Bogner via llvm-commits
llvm-commits at lists.llvm.org
Thu May 12 16:10:30 PDT 2016
Author: bogner
Date: Thu May 12 18:10:30 2016
New Revision: 269379
URL: http://llvm.org/viewvc/llvm-project?rev=269379&view=rev
Log:
SDAG: Implement Select instead of SelectImpl in AArch64DAGToDAGISel
This one has a lot of code churn, but it's all mechanical and
straightforward.
- Where we were returning a node before, call ReplaceNode instead.
- Where we would return null to fall back to another selector, rename
the method to try* and return a bool for success.
- Where we were calling SelectNodeTo, just return afterwards.
Part of llvm.org/pr26808.
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=269379&r1=269378&r2=269379&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Thu May 12 18:10:30 2016
@@ -57,7 +57,7 @@ public:
return SelectionDAGISel::runOnMachineFunction(MF);
}
- SDNode *SelectImpl(SDNode *Node) override;
+ void Select(SDNode *Node) override;
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
@@ -65,8 +65,8 @@ public:
unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
- SDNode *SelectMLAV64LaneV128(SDNode *N);
- SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
+ bool tryMLAV64LaneV128(SDNode *N);
+ bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
@@ -147,28 +147,28 @@ public:
SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
const unsigned SubRegs[]);
- SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
+ void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
- SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
+ bool tryIndexedLoad(SDNode *N);
- SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
unsigned SubRegIdx);
- SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
unsigned SubRegIdx);
- SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
- SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
-
- SDNode *SelectBitfieldExtractOp(SDNode *N);
- SDNode *SelectBitfieldInsertOp(SDNode *N);
- SDNode *SelectBitfieldInsertInZeroOp(SDNode *N);
+ void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+
+ bool tryBitfieldExtractOp(SDNode *N);
+ bool tryBitfieldInsertOp(SDNode *N);
+ bool tryBitfieldInsertInZeroOp(SDNode *N);
- SDNode *SelectReadRegister(SDNode *N);
- SDNode *SelectWriteRegister(SDNode *N);
+ bool tryReadRegister(SDNode *N);
+ bool tryWriteRegister(SDNode *N);
// Include the pieces autogenerated from the target description.
#include "AArch64GenDAGISel.inc"
@@ -453,7 +453,7 @@ static bool checkV64LaneV128(SDValue Op0
/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
/// is a lane in the upper half of a 128-bit vector. Recognize and select this
/// so that we don't emit unnecessary lane extracts.
-SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
+bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) {
SDLoc dl(N);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -468,7 +468,7 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV6
if (Op1.getOpcode() != ISD::MUL ||
!checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
LaneIdx))
- return nullptr;
+ return false;
}
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
@@ -494,10 +494,11 @@ SDNode *AArch64DAGToDAGISel::SelectMLAV6
break;
}
- return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops));
+ return true;
}
-SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
+bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) {
SDLoc dl(N);
SDValue SMULLOp0;
SDValue SMULLOp1;
@@ -505,7 +506,7 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV
if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
LaneIdx))
- return nullptr;
+ return false;
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
@@ -538,7 +539,8 @@ SDNode *AArch64DAGToDAGISel::SelectMULLV
} else
llvm_unreachable("Unrecognized intrinsic.");
- return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops));
+ return true;
}
/// Instructions that accept extend modifiers like UXTW expect the register
@@ -1013,8 +1015,8 @@ SDValue AArch64DAGToDAGISel::createTuple
return SDValue(N, 0);
}
-SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
- unsigned Opc, bool isExt) {
+void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
+ bool isExt) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
@@ -1031,13 +1033,13 @@ SDNode *AArch64DAGToDAGISel::SelectTable
Ops.push_back(N->getOperand(1));
Ops.push_back(RegSeq);
Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
- return CurDAG->getMachineNode(Opc, dl, VT, Ops);
+ ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops));
}
-SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
+bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isUnindexed())
- return nullptr;
+ return false;
EVT VT = LD->getMemoryVT();
EVT DstVT = N->getValueType(0);
ISD::MemIndexedMode AM = LD->getAddressingMode();
@@ -1099,7 +1101,7 @@ SDNode *AArch64DAGToDAGISel::SelectIndex
} else if (VT.is128BitVector()) {
Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
} else
- return nullptr;
+ return false;
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
@@ -1110,7 +1112,6 @@ SDNode *AArch64DAGToDAGISel::SelectIndex
SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
MVT::Other, Ops);
// Either way, we're replacing the node, so tell the caller that.
- Done = true;
SDValue LoadedVal = SDValue(Res, 1);
if (InsertTo64) {
SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
@@ -1126,11 +1127,11 @@ SDNode *AArch64DAGToDAGISel::SelectIndex
ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
CurDAG->RemoveDeadNode(N);
- return nullptr;
+ return true;
}
-SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
- unsigned Opc, unsigned SubRegIdx) {
+void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ unsigned SubRegIdx) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
@@ -1148,11 +1149,10 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
CurDAG->RemoveDeadNode(N);
- return nullptr;
}
-SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
- unsigned Opc, unsigned SubRegIdx) {
+void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
+ unsigned Opc, unsigned SubRegIdx) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue Chain = N->getOperand(0);
@@ -1181,11 +1181,10 @@ SDNode *AArch64DAGToDAGISel::SelectPostL
// Update the chain
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
CurDAG->RemoveDeadNode(N);
- return nullptr;
}
-SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
@@ -1197,11 +1196,11 @@ SDNode *AArch64DAGToDAGISel::SelectStore
SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
- return St;
+ ReplaceNode(N, St);
}
-SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
const EVT ResTys[] = {MVT::i64, // Type of the write back register
@@ -1218,7 +1217,7 @@ SDNode *AArch64DAGToDAGISel::SelectPostS
N->getOperand(0)}; // Chain
SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
- return St;
+ ReplaceNode(N, St);
}
namespace {
@@ -1256,8 +1255,8 @@ static SDValue NarrowVector(SDValue V128
V128Reg);
}
-SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1293,11 +1292,10 @@ SDNode *AArch64DAGToDAGISel::SelectLoadL
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
CurDAG->RemoveDeadNode(N);
- return nullptr;
}
-SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1349,11 +1347,10 @@ SDNode *AArch64DAGToDAGISel::SelectPostL
// Update the Chain
ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
CurDAG->RemoveDeadNode(N);
- return nullptr;
}
-SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1379,11 +1376,11 @@ SDNode *AArch64DAGToDAGISel::SelectStore
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(N, St);
}
-SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
- unsigned Opc) {
+void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0);
bool Narrow = VT.getSizeInBits() == 64;
@@ -1414,7 +1411,7 @@ SDNode *AArch64DAGToDAGISel::SelectPostS
MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(N, St);
}
static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
@@ -1658,11 +1655,11 @@ static bool isBitfieldExtractOp(Selectio
return false;
}
-SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {
unsigned Opc, Immr, Imms;
SDValue Opd0;
if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
- return nullptr;
+ return false;
EVT VT = N->getValueType(0);
SDLoc dl(N);
@@ -1675,15 +1672,15 @@ SDNode *AArch64DAGToDAGISel::SelectBitfi
SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
- MachineSDNode *Node =
- CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
- SDValue(BFM, 0), SubReg);
- return Node;
+ ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
+ MVT::i32, SDValue(BFM, 0), SubReg));
+ return true;
}
SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
CurDAG->getTargetConstant(Imms, dl, VT)};
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
}
/// Does DstMask form a complementary pair with the mask provided by
@@ -1986,8 +1983,8 @@ static bool isBitfieldPositioningOp(Sele
// if yes, given reference arguments will be update so that one can replace
// the OR instruction with:
// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
-static SDNode *selectBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
- SelectionDAG *CurDAG) {
+static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
+ SelectionDAG *CurDAG) {
assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
SDValue Dst, Src;
@@ -1995,7 +1992,7 @@ static SDNode *selectBitfieldInsertOpFro
EVT VT = N->getValueType(0);
if (VT != MVT::i32 && VT != MVT::i64)
- return nullptr;
+ return false;
// Because of simplify-demanded-bits in DAGCombine, involved masks may not
// have the expected shape. Try to undo that.
@@ -2083,32 +2080,34 @@ static SDNode *selectBitfieldInsertOpFro
SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT),
CurDAG->getTargetConstant(ImmS, DL, VT)};
unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri;
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
}
- return nullptr;
+ return false;
}
-SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {
if (N->getOpcode() != ISD::OR)
- return nullptr;
+ return false;
APInt NUsefulBits;
getUsefulBits(SDValue(N, 0), NUsefulBits);
// If all bits are not useful, just return UNDEF.
- if (!NUsefulBits)
- return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,
- N->getValueType(0));
+ if (!NUsefulBits) {
+ CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0));
+ return true;
+ }
- return selectBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG);
+ return tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG);
}
/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
/// equivalent of a left shift by a constant amount followed by an and masking
/// out a contiguous set of bits.
-SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertInZeroOp(SDNode *N) {
+bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {
if (N->getOpcode() != ISD::AND)
- return nullptr;
+ return false;
EVT VT = N->getValueType(0);
unsigned Opc;
@@ -2117,13 +2116,13 @@ SDNode *AArch64DAGToDAGISel::SelectBitfi
else if (VT == MVT::i64)
Opc = AArch64::UBFMXri;
else
- return nullptr;
+ return false;
SDValue Op0;
int DstLSB, Width;
if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
Op0, DstLSB, Width))
- return nullptr;
+ return false;
// ImmR is the rotate right amount.
unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
@@ -2133,7 +2132,8 @@ SDNode *AArch64DAGToDAGISel::SelectBitfi
SDLoc DL(N);
SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
CurDAG->getTargetConstant(ImmS, DL, VT)};
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ CurDAG->SelectNodeTo(N, Opc, VT, Ops);
+ return true;
}
bool
@@ -2215,17 +2215,19 @@ static int getIntOperandFromRegisterStri
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MRS SysReg mapper.
-SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
+bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
SDLoc DL(N);
int Reg = getIntOperandFromRegisterString(RegString->getString());
- if (Reg != -1)
- return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
- MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(0));
+ if (Reg != -1) {
+ ReplaceNode(N, CurDAG->getMachineNode(
+ AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0)));
+ return true;
+ }
// Use the sysreg mapper to map the remaining possible strings to the
// value for the register to be used for the instruction operand.
@@ -2234,29 +2236,34 @@ SDNode *AArch64DAGToDAGISel::SelectReadR
Reg = mapper.fromString(RegString->getString(),
Subtarget->getFeatureBits(),
IsValidSpecialReg);
- if (IsValidSpecialReg)
- return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
- MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(0));
+ if (IsValidSpecialReg) {
+ ReplaceNode(N, CurDAG->getMachineNode(
+ AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ N->getOperand(0)));
+ return true;
+ }
- return nullptr;
+ return false;
}
// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MSR SysReg mapper.
-SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
+bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {
const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
SDLoc DL(N);
int Reg = getIntOperandFromRegisterString(RegString->getString());
- if (Reg != -1)
- return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
+ if (Reg != -1) {
+ ReplaceNode(
+ N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(2), N->getOperand(0));
+ N->getOperand(2), N->getOperand(0)));
+ return true;
+ }
// Check if the register was one of those allowed as the pstatefield value in
// the MSR (immediate) instruction. To accept the values allowed in the
@@ -2280,10 +2287,12 @@ SDNode *AArch64DAGToDAGISel::SelectWrite
assert(Immed < 16 && "Bad imm");
State = AArch64::MSRpstateImm4;
}
- return CurDAG->getMachineNode(State, DL, MVT::Other,
- CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- CurDAG->getTargetConstant(Immed, DL, MVT::i16),
- N->getOperand(0));
+ ReplaceNode(N, CurDAG->getMachineNode(
+ State, DL, MVT::Other,
+ CurDAG->getTargetConstant(Reg, DL, MVT::i32),
+ CurDAG->getTargetConstant(Immed, DL, MVT::i16),
+ N->getOperand(0)));
+ return true;
}
// Use the sysreg mapper to attempt to map the remaining possible strings
@@ -2294,12 +2303,15 @@ SDNode *AArch64DAGToDAGISel::SelectWrite
Subtarget->getFeatureBits(),
IsValidSpecialReg);
- if (IsValidSpecialReg)
- return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
+ if (IsValidSpecialReg) {
+ ReplaceNode(
+ N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
CurDAG->getTargetConstant(Reg, DL, MVT::i32),
- N->getOperand(2), N->getOperand(0));
+ N->getOperand(2), N->getOperand(0)));
+ return true;
+ }
- return nullptr;
+ return false;
}
/// We've got special pseudo-instructions for these
@@ -2333,7 +2345,7 @@ void AArch64DAGToDAGISel::SelectCMP_SWAP
CurDAG->RemoveDeadNode(N);
}
-SDNode *AArch64DAGToDAGISel::SelectImpl(SDNode *Node) {
+void AArch64DAGToDAGISel::Select(SDNode *Node) {
// Dump information about the Node being selected
DEBUG(errs() << "Selecting: ");
DEBUG(Node->dump(CurDAG));
@@ -2343,11 +2355,10 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return nullptr;
+ return;
}
// Few custom selection stuff.
- SDNode *ResNode = nullptr;
EVT VT = Node->getValueType(0);
switch (Node->getOpcode()) {
@@ -2356,45 +2367,43 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
case ISD::ATOMIC_CMP_SWAP:
SelectCMP_SWAP(Node);
- return nullptr;
+ return;
case ISD::READ_REGISTER:
- if (SDNode *Res = SelectReadRegister(Node))
- return Res;
+ if (tryReadRegister(Node))
+ return;
break;
case ISD::WRITE_REGISTER:
- if (SDNode *Res = SelectWriteRegister(Node))
- return Res;
+ if (tryWriteRegister(Node))
+ return;
break;
case ISD::ADD:
- if (SDNode *I = SelectMLAV64LaneV128(Node))
- return I;
+ if (tryMLAV64LaneV128(Node))
+ return;
break;
case ISD::LOAD: {
// Try to select as an indexed load. Fall through to normal processing
// if we can't.
- bool Done = false;
- SDNode *I = SelectIndexedLoad(Node, Done);
- if (Done)
- return I;
+ if (tryIndexedLoad(Node))
+ return;
break;
}
case ISD::SRL:
case ISD::AND:
case ISD::SRA:
- if (SDNode *I = SelectBitfieldExtractOp(Node))
- return I;
- if (SDNode *I = SelectBitfieldInsertInZeroOp(Node))
- return I;
+ if (tryBitfieldExtractOp(Node))
+ return;
+ if (tryBitfieldInsertInZeroOp(Node))
+ return;
break;
case ISD::OR:
- if (SDNode *I = SelectBitfieldInsertOp(Node))
- return I;
+ if (tryBitfieldInsertOp(Node))
+ return;
break;
case ISD::EXTRACT_VECTOR_ELT: {
@@ -2437,19 +2446,25 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
DEBUG(Extract->dumpr(CurDAG));
DEBUG(dbgs() << "\n");
- return Extract.getNode();
+ ReplaceNode(Node, Extract.getNode());
+ return;
}
case ISD::Constant: {
// Materialize zero constants as copies from WZR/XZR. This allows
// the coalescer to propagate these into other instructions.
ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
if (ConstNode->isNullValue()) {
- if (VT == MVT::i32)
- return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
- AArch64::WZR, MVT::i32).getNode();
- else if (VT == MVT::i64)
- return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
- AArch64::XZR, MVT::i64).getNode();
+ if (VT == MVT::i32) {
+ SDValue New = CurDAG->getCopyFromReg(
+ CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32);
+ ReplaceNode(Node, New.getNode());
+ return;
+ } else if (VT == MVT::i64) {
+ SDValue New = CurDAG->getCopyFromReg(
+ CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64);
+ ReplaceNode(Node, New.getNode());
+ return;
+ }
}
break;
}
@@ -2464,7 +2479,8 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
SDLoc DL(Node);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
- return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
+ CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
+ return;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
@@ -2486,7 +2502,8 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
- return Ld;
+ ReplaceNode(Node, Ld);
+ return;
}
case Intrinsic::aarch64_stlxp:
case Intrinsic::aarch64_stxp: {
@@ -2507,208 +2524,305 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
- return St;
+ ReplaceNode(Node, St);
+ return;
}
case Intrinsic::aarch64_neon_ld1x2:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld1x3:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld1x4:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4r:
- if (VT == MVT::v8i8)
- return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld2lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 2, AArch64::LD2i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 2, AArch64::LD2i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 2, AArch64::LD2i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 2, AArch64::LD2i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 2, AArch64::LD2i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 2, AArch64::LD2i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 2, AArch64::LD2i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 2, AArch64::LD2i64);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld3lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 3, AArch64::LD3i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 3, AArch64::LD3i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 3, AArch64::LD3i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 3, AArch64::LD3i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 3, AArch64::LD3i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 3, AArch64::LD3i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 3, AArch64::LD3i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 3, AArch64::LD3i64);
+ return;
+ }
break;
case Intrinsic::aarch64_neon_ld4lane:
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectLoadLane(Node, 4, AArch64::LD4i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectLoadLane(Node, 4, AArch64::LD4i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectLoadLane(Node, 4, AArch64::LD4i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectLoadLane(Node, 4, AArch64::LD4i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectLoadLane(Node, 4, AArch64::LD4i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectLoadLane(Node, 4, AArch64::LD4i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectLoadLane(Node, 4, AArch64::LD4i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectLoadLane(Node, 4, AArch64::LD4i64);
+ return;
+ }
break;
}
} break;
@@ -2718,33 +2832,39 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
default:
break;
case Intrinsic::aarch64_neon_tbl2:
- return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
- : AArch64::TBLv16i8Two,
- false);
+ SelectTable(Node, 2,
+ VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbl3:
- return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
- : AArch64::TBLv16i8Three,
- false);
+ SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
+ : AArch64::TBLv16i8Three,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbl4:
- return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
- : AArch64::TBLv16i8Four,
- false);
+ SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
+ : AArch64::TBLv16i8Four,
+ false);
+ return;
case Intrinsic::aarch64_neon_tbx2:
- return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
- : AArch64::TBXv16i8Two,
- true);
+ SelectTable(Node, 2,
+ VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two,
+ true);
+ return;
case Intrinsic::aarch64_neon_tbx3:
- return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
- : AArch64::TBXv16i8Three,
- true);
+ SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
+ : AArch64::TBXv16i8Three,
+ true);
+ return;
case Intrinsic::aarch64_neon_tbx4:
- return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
- : AArch64::TBXv16i8Four,
- true);
+ SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
+ : AArch64::TBXv16i8Four,
+ true);
+ return;
case Intrinsic::aarch64_neon_smull:
case Intrinsic::aarch64_neon_umull:
- if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
- return N;
+ if (tryMULLV64LaneV128(IntNo, Node))
+ return;
break;
}
break;
@@ -2757,588 +2877,827 @@ SDNode *AArch64DAGToDAGISel::SelectImpl(
default:
break;
case Intrinsic::aarch64_neon_st1x2: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 2, AArch64::ST1Twov8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 2, AArch64::ST1Twov16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 2, AArch64::ST1Twov4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 2, AArch64::ST1Twov8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 2, AArch64::ST1Twov2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 2, AArch64::ST1Twov4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 2, AArch64::ST1Twov2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 2, AArch64::ST1Twov8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 2, AArch64::ST1Twov16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 2, AArch64::ST1Twov4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 2, AArch64::ST1Twov8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 2, AArch64::ST1Twov2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 2, AArch64::ST1Twov4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st1x3: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 3, AArch64::ST1Threev8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 3, AArch64::ST1Threev16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 3, AArch64::ST1Threev4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 3, AArch64::ST1Threev8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 3, AArch64::ST1Threev2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 3, AArch64::ST1Threev4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 3, AArch64::ST1Threev2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 3, AArch64::ST1Threev8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 3, AArch64::ST1Threev16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 3, AArch64::ST1Threev4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 3, AArch64::ST1Threev8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 3, AArch64::ST1Threev2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 3, AArch64::ST1Threev4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st1x4: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 4, AArch64::ST1Fourv8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 4, AArch64::ST1Fourv16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 4, AArch64::ST1Fourv4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 4, AArch64::ST1Fourv8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 4, AArch64::ST1Fourv2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 4, AArch64::ST1Fourv4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 4, AArch64::ST1Fourv8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 4, AArch64::ST1Fourv16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 4, AArch64::ST1Fourv4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 4, AArch64::ST1Fourv8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 4, AArch64::ST1Fourv2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 4, AArch64::ST1Fourv4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st2: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 2, AArch64::ST2Twov8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 2, AArch64::ST2Twov16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 2, AArch64::ST2Twov4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 2, AArch64::ST2Twov8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 2, AArch64::ST2Twov2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 2, AArch64::ST2Twov4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 2, AArch64::ST2Twov2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 2, AArch64::ST1Twov1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 2, AArch64::ST2Twov8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 2, AArch64::ST2Twov16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 2, AArch64::ST2Twov4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 2, AArch64::ST2Twov8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 2, AArch64::ST2Twov2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 2, AArch64::ST2Twov4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 2, AArch64::ST2Twov2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 2, AArch64::ST1Twov1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st3: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 3, AArch64::ST3Threev8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 3, AArch64::ST3Threev16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 3, AArch64::ST3Threev4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 3, AArch64::ST3Threev8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 3, AArch64::ST3Threev2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 3, AArch64::ST3Threev4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 3, AArch64::ST3Threev2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 3, AArch64::ST1Threev1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 3, AArch64::ST3Threev8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 3, AArch64::ST3Threev16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 3, AArch64::ST3Threev4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 3, AArch64::ST3Threev8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 3, AArch64::ST3Threev2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 3, AArch64::ST3Threev4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 3, AArch64::ST3Threev2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 3, AArch64::ST1Threev1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st4: {
- if (VT == MVT::v8i8)
- return SelectStore(Node, 4, AArch64::ST4Fourv8b);
- else if (VT == MVT::v16i8)
- return SelectStore(Node, 4, AArch64::ST4Fourv16b);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectStore(Node, 4, AArch64::ST4Fourv4h);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectStore(Node, 4, AArch64::ST4Fourv8h);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectStore(Node, 4, AArch64::ST4Fourv2s);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectStore(Node, 4, AArch64::ST4Fourv4s);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectStore(Node, 4, AArch64::ST4Fourv2d);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ if (VT == MVT::v8i8) {
+ SelectStore(Node, 4, AArch64::ST4Fourv8b);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectStore(Node, 4, AArch64::ST4Fourv16b);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectStore(Node, 4, AArch64::ST4Fourv4h);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectStore(Node, 4, AArch64::ST4Fourv8h);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectStore(Node, 4, AArch64::ST4Fourv2s);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectStore(Node, 4, AArch64::ST4Fourv4s);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectStore(Node, 4, AArch64::ST4Fourv2d);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectStore(Node, 4, AArch64::ST1Fourv1d);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st2lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 2, AArch64::ST2i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 2, AArch64::ST2i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 2, AArch64::ST2i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 2, AArch64::ST2i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 2, AArch64::ST2i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 2, AArch64::ST2i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 2, AArch64::ST2i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 2, AArch64::ST2i64);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st3lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 3, AArch64::ST3i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 3, AArch64::ST3i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 3, AArch64::ST3i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 3, AArch64::ST3i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 3, AArch64::ST3i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 3, AArch64::ST3i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 3, AArch64::ST3i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 3, AArch64::ST3i64);
+ return;
+ }
break;
}
case Intrinsic::aarch64_neon_st4lane: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectStoreLane(Node, 4, AArch64::ST4i8);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectStoreLane(Node, 4, AArch64::ST4i16);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectStoreLane(Node, 4, AArch64::ST4i32);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectStoreLane(Node, 4, AArch64::ST4i64);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectStoreLane(Node, 4, AArch64::ST4i8);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectStoreLane(Node, 4, AArch64::ST4i16);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectStoreLane(Node, 4, AArch64::ST4i32);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectStoreLane(Node, 4, AArch64::ST4i64);
+ return;
+ }
break;
}
}
break;
}
case AArch64ISD::LD2post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD3post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD4post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x2post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x3post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1x4post: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD2DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD3DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD4DUPpost: {
- if (VT == MVT::v8i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
- else if (VT == MVT::v16i8)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
+ if (VT == MVT::v8i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
+ return;
+ }
break;
}
case AArch64ISD::LD1LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD2LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD3LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::LD4LANEpost: {
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST2post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST3post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST4post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x2post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x3post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST1x4post: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v8i8)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
- else if (VT == MVT::v16i8)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
- else if (VT == MVT::v4i16 || VT == MVT::v4f16)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v8f16)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
- else if (VT == MVT::v2i32 || VT == MVT::v2f32)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v4f32)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
- else if (VT == MVT::v1i64 || VT == MVT::v1f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v2f64)
- return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
+ if (VT == MVT::v8i8) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
+ return;
+ } else if (VT == MVT::v16i8) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
+ return;
+ } else if (VT == MVT::v4i16 || VT == MVT::v4f16) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v8f16) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
+ return;
+ } else if (VT == MVT::v2i32 || VT == MVT::v2f32) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v4f32) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
+ return;
+ } else if (VT == MVT::v1i64 || VT == MVT::v1f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v2f64) {
+ SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST2LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST3LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
+ return;
+ }
break;
}
case AArch64ISD::ST4LANEpost: {
VT = Node->getOperand(1).getValueType();
- if (VT == MVT::v16i8 || VT == MVT::v8i8)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
- else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
- VT == MVT::v8f16)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
- else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
- VT == MVT::v2f32)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
- else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
- VT == MVT::v1f64)
- return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
+ if (VT == MVT::v16i8 || VT == MVT::v8i8) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
+ return;
+ } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
+ VT == MVT::v8f16) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
+ return;
+ } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
+ return;
+ } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64) {
+ SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
+ return;
+ }
break;
}
}
// Select the default instruction
- ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ");
- if (ResNode == nullptr || ResNode == Node)
- DEBUG(Node->dump(CurDAG));
- else
- DEBUG(ResNode->dump(CurDAG));
- DEBUG(errs() << "\n");
-
- return ResNode;
+ SelectCode(Node);
}
/// createAArch64ISelDag - This pass converts a legalized DAG into a
More information about the llvm-commits
mailing list