[llvm] 860f9e5 - [NFC][X86] Reorder the registers to reduce unnecessary iterations (#70222)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 1 09:12:10 PDT 2023
Author: Shengchen Kan
Date: 2023-11-02T00:12:05+08:00
New Revision: 860f9e5170767c08a879b592c9121d35e90a320e
URL: https://github.com/llvm/llvm-project/commit/860f9e5170767c08a879b592c9121d35e90a320e
DIFF: https://github.com/llvm/llvm-project/commit/860f9e5170767c08a879b592c9121d35e90a320e.diff
LOG: [NFC][X86] Reorder the registers to reduce unnecessary iterations (#70222)
* Introduce field `PositionOrder` for class `Register` and
`RegisterTuples`
* If register A's `PositionOrder` < register B's `PositionOrder`, then A
is placed before B in the enum in X86GenRegisterInfo.inc
* The new order of registers in the enum for X86 will be
1. Registers before AVX512,
2. AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
3. AMX registers (TMM)
4. APX registers (R16-R31)
* Add a new target hook `getNumSupportedRegs()` to return the number of
registers for the function (may overestimate).
* Replace `getNumRegs()` with `getNumSupportedRegs()` in LiveVariables
to eliminate iterations on unsupported registers
This patch can reduce 0.3% instruction count regression for sqlite3
during compile-stage (O3) by not iterating on APX registers
for #67702
Added:
Modified:
llvm/include/llvm/CodeGen/LiveVariables.h
llvm/include/llvm/CodeGen/TargetRegisterInfo.h
llvm/include/llvm/TableGen/Record.h
llvm/include/llvm/Target/Target.td
llvm/lib/CodeGen/LiveVariables.cpp
llvm/lib/Target/X86/AsmParser/X86Operand.h
llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
llvm/lib/Target/X86/X86RegisterInfo.cpp
llvm/lib/Target/X86/X86RegisterInfo.h
llvm/lib/Target/X86/X86RegisterInfo.td
llvm/test/CodeGen/X86/ipra-reg-usage.ll
llvm/utils/TableGen/CodeGenRegisters.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/LiveVariables.h b/llvm/include/llvm/CodeGen/LiveVariables.h
index 9ed4c7bdf7b1771..5d7f9ff3053cad0 100644
--- a/llvm/include/llvm/CodeGen/LiveVariables.h
+++ b/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -147,7 +147,7 @@ class LiveVariables : public MachineFunctionPass {
bool HandlePhysRegKill(Register Reg, MachineInstr *MI);
/// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
- void HandleRegMask(const MachineOperand&);
+ void HandleRegMask(const MachineOperand &, unsigned);
void HandlePhysRegUse(Register Reg, MachineInstr &MI);
void HandlePhysRegDef(Register Reg, MachineInstr *MI,
@@ -170,7 +170,8 @@ class LiveVariables : public MachineFunctionPass {
/// is coming from.
void analyzePHINodes(const MachineFunction& Fn);
- void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
+ void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs,
+ unsigned NumRegs);
void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
public:
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 5bf27e40eee8909..337fab735a09522 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -266,6 +266,11 @@ class TargetRegisterInfo : public MCRegisterInfo {
virtual ~TargetRegisterInfo();
public:
+ /// Return the number of registers for the function. (may overestimate)
+ virtual unsigned getNumSupportedRegs(const MachineFunction &) const {
+ return getNumRegs();
+ }
+
// Register numbers can represent physical registers, virtual registers, and
// sometimes stack slots. The unsigned values are divided into these ranges:
//
diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index 5a47a79d8f8fff9..c1b352e974eed9b 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -2154,6 +2154,11 @@ struct LessRecordRegister {
};
bool operator()(const Record *Rec1, const Record *Rec2) const {
+ int64_t LHSPositionOrder = Rec1->getValueAsInt("PositionOrder");
+ int64_t RHSPositionOrder = Rec2->getValueAsInt("PositionOrder");
+ if (LHSPositionOrder != RHSPositionOrder)
+ return LHSPositionOrder < RHSPositionOrder;
+
RecordParts LHSParts(StringRef(Rec1->getName()));
RecordParts RHSParts(StringRef(Rec2->getName()));
diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index 8ad2066aa83de9e..3b1d2f45267e9e0 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -205,6 +205,10 @@ class Register<string n, list<string> altNames = []> {
// isConstant - This register always holds a constant value (e.g. the zero
// register in architectures such as MIPS)
bit isConstant = false;
+
+ /// PositionOrder - Indicate tablegen to place the newly added register at a later
+ /// position to avoid iterations on them on unsupported target.
+ int PositionOrder = 0;
}
// RegisterWithSubRegs - This can be used to define instances of Register which
@@ -417,6 +421,10 @@ class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs,
// List of asm names for the generated tuple registers.
list<string> RegAsmNames = RegNames;
+
+ // PositionOrder - Indicate tablegen to place the newly added register at a later
+ // position to avoid iterations on them on unsupported target.
+ int PositionOrder = 0;
}
// RegisterCategory - This class is a list of RegisterClasses that belong to a
diff --git a/llvm/lib/CodeGen/LiveVariables.cpp b/llvm/lib/CodeGen/LiveVariables.cpp
index 6b983b6320c711f..b85526cfb380b6a 100644
--- a/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/llvm/lib/CodeGen/LiveVariables.cpp
@@ -406,11 +406,11 @@ bool LiveVariables::HandlePhysRegKill(Register Reg, MachineInstr *MI) {
return true;
}
-void LiveVariables::HandleRegMask(const MachineOperand &MO) {
+void LiveVariables::HandleRegMask(const MachineOperand &MO, unsigned NumRegs) {
// Call HandlePhysRegKill() for all live registers clobbered by Mask.
// Clobbered registers are always dead, sp there is no need to use
// HandlePhysRegDef().
- for (unsigned Reg = 1, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg) {
+ for (unsigned Reg = 1; Reg != NumRegs; ++Reg) {
// Skip dead regs.
if (!PhysRegDef[Reg] && !PhysRegUse[Reg])
continue;
@@ -421,7 +421,8 @@ void LiveVariables::HandleRegMask(const MachineOperand &MO) {
// This avoids needless implicit operands.
unsigned Super = Reg;
for (MCPhysReg SR : TRI->superregs(Reg))
- if ((PhysRegDef[SR] || PhysRegUse[SR]) && MO.clobbersPhysReg(SR))
+ if (SR < NumRegs && (PhysRegDef[SR] || PhysRegUse[SR]) &&
+ MO.clobbersPhysReg(SR))
Super = SR;
HandlePhysRegKill(Super, nullptr);
}
@@ -478,7 +479,8 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr &MI,
}
void LiveVariables::runOnInstr(MachineInstr &MI,
- SmallVectorImpl<unsigned> &Defs) {
+ SmallVectorImpl<unsigned> &Defs,
+ unsigned NumRegs) {
assert(!MI.isDebugOrPseudoInstr());
// Process all of the operands of the instruction...
unsigned NumOperandsToProcess = MI.getNumOperands();
@@ -527,7 +529,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
// Process all masked registers. (Call clobbers).
for (unsigned Mask : RegMasks)
- HandleRegMask(MI.getOperand(Mask));
+ HandleRegMask(MI.getOperand(Mask), NumRegs);
// Process all defs.
for (unsigned MOReg : DefRegs) {
@@ -539,7 +541,7 @@ void LiveVariables::runOnInstr(MachineInstr &MI,
UpdatePhysRegDefs(MI, Defs);
}
-void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
+void LiveVariables::runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs) {
// Mark live-in registers as live-in.
SmallVector<unsigned, 4> Defs;
for (const auto &LI : MBB->liveins()) {
@@ -556,7 +558,7 @@ void LiveVariables::runOnBlock(MachineBasicBlock *MBB, const unsigned NumRegs) {
continue;
DistanceMap.insert(std::make_pair(&MI, Dist++));
- runOnInstr(MI, Defs);
+ runOnInstr(MI, Defs, NumRegs);
}
// Handle any virtual assignments from PHI nodes which might be at the
@@ -597,7 +599,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
TRI = MF->getSubtarget().getRegisterInfo();
- const unsigned NumRegs = TRI->getNumRegs();
+ const unsigned NumRegs = TRI->getNumSupportedRegs(mf);
PhysRegDef.assign(NumRegs, nullptr);
PhysRegUse.assign(NumRegs, nullptr);
PHIVarInfo.resize(MF->getNumBlockIDs());
diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 4661e73c3ef8e86..641158cb351fc4f 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -357,28 +357,28 @@ struct X86Operand final : public MCParsedAsmOperand {
}
bool isMem64_RC128X() const {
- return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
+ return isMem64() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem128_RC128X() const {
- return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
+ return isMem128() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem128_RC256X() const {
- return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
+ return isMem128() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem256_RC128X() const {
- return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
+ return isMem256() && X86II::isXMMReg(Mem.IndexReg);
}
bool isMem256_RC256X() const {
- return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
+ return isMem256() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem256_RC512() const {
- return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
+ return isMem256() && X86II::isZMMReg(Mem.IndexReg);
}
bool isMem512_RC256X() const {
- return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
+ return isMem512() && X86II::isYMMReg(Mem.IndexReg);
}
bool isMem512_RC512() const {
- return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
+ return isMem512() && X86II::isZMMReg(Mem.IndexReg);
}
bool isMem512_GR16() const {
if (!isMem512())
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 1e5a3606f33a6fc..e6db840c0802091 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -1182,11 +1182,39 @@ namespace X86II {
}
}
+ /// \returns true if the register is a XMM.
+ inline bool isXMMReg(unsigned RegNo) {
+ assert(X86::XMM15 - X86::XMM0 == 15 &&
+ "XMM0-15 registers are not continuous");
+ assert(X86::XMM31 - X86::XMM16 == 15 &&
+ "XMM16-31 registers are not continuous");
+ return (RegNo >= X86::XMM0 && RegNo <= X86::XMM15) ||
+ (RegNo >= X86::XMM16 && RegNo <= X86::XMM31);
+ }
+
+ /// \returns true if the register is a YMM.
+ inline bool isYMMReg(unsigned RegNo) {
+ assert(X86::YMM15 - X86::YMM0 == 15 &&
+ "YMM0-15 registers are not continuous");
+ assert(X86::YMM31 - X86::YMM16 == 15 &&
+ "YMM16-31 registers are not continuous");
+ return (RegNo >= X86::YMM0 && RegNo <= X86::YMM15) ||
+ (RegNo >= X86::YMM16 && RegNo <= X86::YMM31);
+ }
+
+ /// \returns true if the register is a ZMM.
+ inline bool isZMMReg(unsigned RegNo) {
+ assert(X86::ZMM31 - X86::ZMM0 == 31 && "ZMM registers are not continuous");
+ return RegNo >= X86::ZMM0 && RegNo <= X86::ZMM31;
+ }
+
/// \returns true if the MachineOperand is a x86-64 extended (r8 or
/// higher) register, e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
- if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM31) ||
- (RegNo >= X86::YMM8 && RegNo <= X86::YMM31) ||
+ if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
+ (RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
+ (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
(RegNo >= X86::ZMM8 && RegNo <= X86::ZMM31))
return true;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index 031ba9f87acbbfd..ee82faebb57e6ce 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -234,11 +234,11 @@ using namespace llvm;
CASE_AVX_INS_COMMON(Inst##SS4, , mr_Int)
static unsigned getVectorRegSize(unsigned RegNo) {
- if (X86::ZMM0 <= RegNo && RegNo <= X86::ZMM31)
+ if (X86II::isZMMReg(RegNo))
return 512;
- if (X86::YMM0 <= RegNo && RegNo <= X86::YMM31)
+ if (X86II::isYMMReg(RegNo))
return 256;
- if (X86::XMM0 <= RegNo && RegNo <= X86::XMM31)
+ if (X86II::isXMMReg(RegNo))
return 128;
if (X86::MM0 <= RegNo && RegNo <= X86::MM7)
return 64;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 3504ca2b5743f88..4fd8b6d17e862e0 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -604,8 +604,9 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
}
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
- for (unsigned n = 16; n != 32; ++n) {
- for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
+ for (unsigned n = 0; n != 16; ++n) {
+ for (MCRegAliasIterator AI(X86::XMM16 + n, this, true); AI.isValid();
+ ++AI)
Reserved.set(*AI);
}
}
@@ -616,6 +617,26 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
+unsigned X86RegisterInfo::getNumSupportedRegs(const MachineFunction &MF) const {
+ // All existing Intel CPUs that support AMX support AVX512 and all existing
+ // Intel CPUs that support APX support AMX. AVX512 implies AVX.
+ //
+ // We enumerate the registers in X86GenRegisterInfo.inc in this order:
+ //
+ // Registers before AVX512,
+ // AVX512 registers (X/YMM16-31, ZMM0-31, K registers)
+ // AMX registers (TMM)
+ // APX registers (R16-R31)
+ //
+ // and try to return the minimum number of registers supported by the target.
+
+ assert((X86::R15WH + 1 == X86 ::YMM0) && (X86::YMM15 + 1 == X86::K0) &&
+ (X86::K6_K7 + 1 == X86::TMMCFG) &&
+ (X86::TMM7 + 1 == X86::NUM_TARGET_REGS) &&
+ "Register number may be incorrect");
+ return X86::NUM_TARGET_REGS;
+}
+
bool X86RegisterInfo::isArgumentRegister(const MachineFunction &MF,
MCRegister Reg) const {
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 0671f79676009ee..7296a5f021e4ad4 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -51,6 +51,9 @@ class X86RegisterInfo final : public X86GenRegisterInfo {
public:
explicit X86RegisterInfo(const Triple &TT);
+ /// Return the number of registers for the function.
+ unsigned getNumSupportedRegs(const MachineFunction &MF) const override;
+
// FIXME: This should be tablegen'd like getDwarfRegNum is
int getSEHRegNum(unsigned i) const;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.td b/llvm/lib/Target/X86/X86RegisterInfo.td
index 81b7597cc8ea5c0..898a3f97e5236df 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -223,6 +223,8 @@ def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>;
def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>;
def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>;
+let PositionOrder = 2 in {
+// XMM16-31 registers, used by AVX-512 instructions.
def XMM16: X86Reg<"xmm16", 16>, DwarfRegNum<[67, -2, -2]>;
def XMM17: X86Reg<"xmm17", 17>, DwarfRegNum<[68, -2, -2]>;
def XMM18: X86Reg<"xmm18", 18>, DwarfRegNum<[69, -2, -2]>;
@@ -239,27 +241,51 @@ def XMM28: X86Reg<"xmm28", 28>, DwarfRegNum<[79, -2, -2]>;
def XMM29: X86Reg<"xmm29", 29>, DwarfRegNum<[80, -2, -2]>;
def XMM30: X86Reg<"xmm30", 30>, DwarfRegNum<[81, -2, -2]>;
def XMM31: X86Reg<"xmm31", 31>, DwarfRegNum<[82, -2, -2]>;
+}
// YMM0-15 registers, used by AVX instructions and
// YMM16-31 registers, used by AVX-512 instructions.
-let SubRegIndices = [sub_xmm] in {
- foreach Index = 0-31 in {
+let SubRegIndices = [sub_xmm], PositionOrder = 1 in {
+ foreach Index = 0-15 in {
+ def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
+ DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
+ }
+}
+let SubRegIndices = [sub_xmm], PositionOrder = 2 in {
+ foreach Index = 16-31 in {
def YMM#Index : X86Reg<"ymm"#Index, Index, [!cast<X86Reg>("XMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
+
// ZMM Registers, used by AVX-512 instructions.
-let SubRegIndices = [sub_ymm] in {
+let SubRegIndices = [sub_ymm], PositionOrder = 2 in {
foreach Index = 0-31 in {
def ZMM#Index : X86Reg<"zmm"#Index, Index, [!cast<X86Reg>("YMM"#Index)]>,
DwarfRegAlias<!cast<X86Reg>("XMM"#Index)>;
}
}
+let PositionOrder = 2 in {
+// Mask Registers, used by AVX-512 instructions.
+def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
+def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
+def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, 95, 95]>;
+def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, 96, 96]>;
+def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
+def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
+def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
+def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
+// Mask register pairs
+def KPAIRS : RegisterTuples<[sub_mask_0, sub_mask_1],
+ [(add K0, K2, K4, K6), (add K1, K3, K5, K7)]>;
+}
+
+// TMM registers, used by AMX instructions.
+let PositionOrder = 3 in {
// Tile config registers.
def TMMCFG: X86Reg<"tmmcfg", 0>;
-
// Tile "registers".
def TMM0: X86Reg<"tmm0", 0>;
def TMM1: X86Reg<"tmm1", 1>;
@@ -269,16 +295,7 @@ def TMM4: X86Reg<"tmm4", 4>;
def TMM5: X86Reg<"tmm5", 5>;
def TMM6: X86Reg<"tmm6", 6>;
def TMM7: X86Reg<"tmm7", 7>;
-
-// Mask Registers, used by AVX-512 instructions.
-def K0 : X86Reg<"k0", 0>, DwarfRegNum<[118, 93, 93]>;
-def K1 : X86Reg<"k1", 1>, DwarfRegNum<[119, 94, 94]>;
-def K2 : X86Reg<"k2", 2>, DwarfRegNum<[120, 95, 95]>;
-def K3 : X86Reg<"k3", 3>, DwarfRegNum<[121, 96, 96]>;
-def K4 : X86Reg<"k4", 4>, DwarfRegNum<[122, 97, 97]>;
-def K5 : X86Reg<"k5", 5>, DwarfRegNum<[123, 98, 98]>;
-def K6 : X86Reg<"k6", 6>, DwarfRegNum<[124, 99, 99]>;
-def K7 : X86Reg<"k7", 7>, DwarfRegNum<[125, 100, 100]>;
+}
// Floating point stack registers. These don't map one-to-one to the FP
// pseudo registers, but we still mark them as aliasing FP registers. That
@@ -627,10 +644,6 @@ def VK16 : RegisterClass<"X86", [v16i1], 16, (add VK8)> {let Size = 16;}
def VK32 : RegisterClass<"X86", [v32i1], 32, (add VK16)> {let Size = 32;}
def VK64 : RegisterClass<"X86", [v64i1], 64, (add VK32)> {let Size = 64;}
-// Mask register pairs
-def KPAIRS : RegisterTuples<[sub_mask_0, sub_mask_1],
- [(add K0, K2, K4, K6), (add K1, K3, K5, K7)]>;
-
def VK1PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}
def VK2PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}
def VK4PAIR : RegisterClass<"X86", [untyped], 16, (add KPAIRS)> {let Size = 32;}
diff --git a/llvm/test/CodeGen/X86/ipra-reg-usage.ll b/llvm/test/CodeGen/X86/ipra-reg-usage.ll
index 36c4d6eff001885..4d0c94125c761c9 100644
--- a/llvm/test/CodeGen/X86/ipra-reg-usage.ll
+++ b/llvm/test/CodeGen/X86/ipra-reg-usage.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $esp $fpcw $fpsw $fs $fs_base $gs $gs_base $hip $hsp $ip $mxcsr $rflags $rip $riz $rsp $sp $sph $spl $ss $ssp $tmmcfg $_eflags $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $tmm0 $tmm1 $tmm2 $tmm3 $tmm4 $tmm5 $tmm6 $tmm7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $r11b $r11bh $r11d $r11w $r11wh $k0_k1 $k2_k3 $k4_k5 $k6_k7
+; CHECK: foo Clobbered Registers: $cs $df $ds $eflags $eip $eiz $es $esp $fpcw $fpsw $fs $fs_base $gs $gs_base $hip $hsp $ip $mxcsr $rflags $rip $riz $rsp $sp $sph $spl $ss $ssp $_eflags $cr0 $cr1 $cr2 $cr3 $cr4 $cr5 $cr6 $cr7 $cr8 $cr9 $cr10 $cr11 $cr12 $cr13 $cr14 $cr15 $dr0 $dr1 $dr2 $dr3 $dr4 $dr5 $dr6 $dr7 $dr8 $dr9 $dr10 $dr11 $dr12 $dr13 $dr14 $dr15 $fp0 $fp1 $fp2 $fp3 $fp4 $fp5 $fp6 $fp7 $mm0 $mm1 $mm2 $mm3 $mm4 $mm5 $mm6 $mm7 $r11 $st0 $st1 $st2 $st3 $st4 $st5 $st6 $st7 $r11b $r11bh $r11d $r11w $r11wh $ymm0 $ymm1 $ymm2 $ymm3 $ymm4 $ymm5 $ymm6 $ymm7 $ymm8 $ymm9 $ymm10 $ymm11 $ymm12 $ymm13 $ymm14 $ymm15 $k0 $k1 $k2 $k3 $k4 $k5 $k6 $k7 $xmm16 $xmm17 $xmm18 $xmm19 $xmm20 $xmm21 $xmm22 $xmm23 $xmm24 $xmm25 $xmm26 $xmm27 $xmm28 $xmm29 $xmm30 $xmm31 $ymm16 $ymm17 $ymm18 $ymm19 $ymm20 $ymm21 $ymm22 $ymm23 $ymm24 $ymm25 $ymm26 $ymm27 $ymm28 $ymm29 $ymm30 $ymm31 $zmm0 $zmm1 $zmm2 $zmm3 $zmm4 $zmm5 $zmm6 $zmm7 $zmm8 $zmm9 $zmm10 $zmm11 $zmm12 $zmm13 $zmm14 $zmm15 $zmm16 $zmm17 $zmm18 $zmm19 $zmm20 $zmm21 $zmm22 $zmm23 $zmm24 $zmm25 $zmm26 $zmm27 $zmm28 $zmm29 $zmm30 $zmm31 $k0_k1 $k2_k3 $k4_k5 $k6_k7 $tmmcfg $tmm0 $tmm1 $tmm2 $tmm3 $tmm4 $tmm5 $tmm6 $tmm7
call void @bar1()
call void @bar2()
ret void
diff --git a/llvm/utils/TableGen/CodeGenRegisters.cpp b/llvm/utils/TableGen/CodeGenRegisters.cpp
index 2d5f5c841a174af..d1abdb74ea4a982 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -1175,22 +1175,42 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
for (auto &Idx : SubRegIndices)
Idx.updateComponents(*this);
- // Read in the register definitions.
- std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
- llvm::sort(Regs, LessRecordRegister());
- // Assign the enumeration values.
- for (unsigned i = 0, e = Regs.size(); i != e; ++i)
- getReg(Regs[i]);
-
- // Expand tuples and number the new registers.
- std::vector<Record*> Tups =
- Records.getAllDerivedDefinitions("RegisterTuples");
-
- for (Record *R : Tups) {
- std::vector<Record *> TupRegs = *Sets.expand(R);
- llvm::sort(TupRegs, LessRecordRegister());
- for (Record *RC : TupRegs)
- getReg(RC);
+ // Read in the register and register tuple definitions.
+ std::vector<Record *> Regs = Records.getAllDerivedDefinitions("Register");
+ if (!Regs.empty() && Regs[0]->isSubClassOf("X86Reg")) {
+ // For X86, we need to sort Registers and RegisterTuples together to list
+ // new registers and register tuples at a later position. So that we can
+ // reduce unnecessary iterations on unsupported registers in LiveVariables.
+ // TODO: Remove this logic when migrate from LiveVariables to LiveIntervals
+ // completely.
+ std::vector<Record *> Tups =
+ Records.getAllDerivedDefinitions("RegisterTuples");
+ for (Record *R : Tups) {
+ // Expand tuples and merge the vectors
+ std::vector<Record *> TupRegs = *Sets.expand(R);
+ Regs.insert(Regs.end(), TupRegs.begin(), TupRegs.end());
+ }
+
+ llvm::sort(Regs, LessRecordRegister());
+ // Assign the enumeration values.
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i)
+ getReg(Regs[i]);
+ } else {
+ llvm::sort(Regs, LessRecordRegister());
+ // Assign the enumeration values.
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i)
+ getReg(Regs[i]);
+
+ // Expand tuples and number the new registers.
+ std::vector<Record *> Tups =
+ Records.getAllDerivedDefinitions("RegisterTuples");
+
+ for (Record *R : Tups) {
+ std::vector<Record *> TupRegs = *Sets.expand(R);
+ llvm::sort(TupRegs, LessRecordRegister());
+ for (Record *RC : TupRegs)
+ getReg(RC);
+ }
}
// Now all the registers are known. Build the object graph of explicit
More information about the llvm-commits
mailing list