[llvm] 6cfe41d - [X86] Rename more target feature related things consistency. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 17 22:27:53 PDT 2022
Author: Craig Topper
Date: 2022-03-17T22:27:17-07:00
New Revision: 6cfe41dcc885f7e1f641153c63e1b2bc9441abb7
URL: https://github.com/llvm/llvm-project/commit/6cfe41dcc885f7e1f641153c63e1b2bc9441abb7
DIFF: https://github.com/llvm/llvm-project/commit/6cfe41dcc885f7e1f641153c63e1b2bc9441abb7.diff
LOG: [X86] Rename more target feature related things consistency. NFC
-Rename Mode*Bit to Is*Bit to match X86Subtarget.
-Rename FeatureLAHFSAHF to FeatureLAFHSAFH64 to match X86Subtarget.
-Use consistent capitalization
Reviewed By: skan
Differential Revision: https://reviews.llvm.org/D121975
Added:
Modified:
llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
llvm/lib/Target/X86/X86.td
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86InstrInfo.td
llvm/lib/Target/X86/X86Subtarget.h
llvm/lib/Target/X86/X86TargetTransformInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 6041e4318b4c4..7ba5311e41435 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -124,12 +124,12 @@ class X86AsmParser : public MCTargetAsmParser {
bool matchingInlineAsm, unsigned VariantID = 0) {
// In Code16GCC mode, match as 32-bit.
if (Code16GCC)
- SwitchMode(X86::Mode32Bit);
+ SwitchMode(X86::Is32Bit);
unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
MissingFeatures, matchingInlineAsm,
VariantID);
if (Code16GCC)
- SwitchMode(X86::Mode16Bit);
+ SwitchMode(X86::Is16Bit);
return rv;
}
@@ -1193,19 +1193,19 @@ class X86AsmParser : public MCTargetAsmParser {
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode64Bit];
+ return getSTI().getFeatureBits()[X86::Is64Bit];
}
bool is32BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode32Bit];
+ return getSTI().getFeatureBits()[X86::Is32Bit];
}
bool is16BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return getSTI().getFeatureBits()[X86::Mode16Bit];
+ return getSTI().getFeatureBits()[X86::Is16Bit];
}
void SwitchMode(unsigned mode) {
MCSubtargetInfo &STI = copySTI();
- FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
+ FeatureBitset AllModes({X86::Is64Bit, X86::Is32Bit, X86::Is16Bit});
FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
FeatureBitset FB = ComputeAvailableFeatures(
STI.ToggleFeature(OldMode.flip(mode)));
@@ -3346,7 +3346,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Name = Next;
PatchedName = Name;
- ForcedDataPrefix = X86::Mode32Bit;
+ ForcedDataPrefix = X86::Is32Bit;
IsPrefix = false;
}
}
@@ -4313,15 +4313,15 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
// In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode
// when matching the instruction.
- if (ForcedDataPrefix == X86::Mode32Bit)
- SwitchMode(X86::Mode32Bit);
+ if (ForcedDataPrefix == X86::Is32Bit)
+ SwitchMode(X86::Is32Bit);
// First, try a direct match.
FeatureBitset MissingFeatures;
unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
MissingFeatures, MatchingInlineAsm,
isParsingIntelSyntax());
- if (ForcedDataPrefix == X86::Mode32Bit) {
- SwitchMode(X86::Mode16Bit);
+ if (ForcedDataPrefix == X86::Is32Bit) {
+ SwitchMode(X86::Is16Bit);
ForcedDataPrefix = 0;
}
switch (OriginalError) {
@@ -4886,7 +4886,7 @@ bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
if (IDVal == ".code16") {
Parser.Lex();
if (!is16BitMode()) {
- SwitchMode(X86::Mode16Bit);
+ SwitchMode(X86::Is16Bit);
getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
}
} else if (IDVal == ".code16gcc") {
@@ -4894,19 +4894,19 @@ bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
Parser.Lex();
Code16GCC = true;
if (!is16BitMode()) {
- SwitchMode(X86::Mode16Bit);
+ SwitchMode(X86::Is16Bit);
getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
}
} else if (IDVal == ".code32") {
Parser.Lex();
if (!is32BitMode()) {
- SwitchMode(X86::Mode32Bit);
+ SwitchMode(X86::Is32Bit);
getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
}
} else if (IDVal == ".code64") {
Parser.Lex();
if (!is64BitMode()) {
- SwitchMode(X86::Mode64Bit);
+ SwitchMode(X86::Is64Bit);
getParser().getStreamer().emitAssemblerFlag(MCAF_Code64);
}
} else {
diff --git a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 908eb6d1fab13..730d3d7cdbff4 100644
--- a/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -1722,13 +1722,13 @@ X86GenericDisassembler::X86GenericDisassembler(
std::unique_ptr<const MCInstrInfo> MII)
: MCDisassembler(STI, Ctx), MII(std::move(MII)) {
const FeatureBitset &FB = STI.getFeatureBits();
- if (FB[X86::Mode16Bit]) {
+ if (FB[X86::Is16Bit]) {
fMode = MODE_16BIT;
return;
- } else if (FB[X86::Mode32Bit]) {
+ } else if (FB[X86::Is32Bit]) {
fMode = MODE_32BIT;
return;
- } else if (FB[X86::Mode64Bit]) {
+ } else if (FB[X86::Is64Bit]) {
fMode = MODE_64BIT;
return;
}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
index 9ab0e98fbb347..6fd3db4515ecb 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
@@ -55,7 +55,7 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, uint64_t Address,
// InstrInfo.td as soon as Requires clause is supported properly
// for InstAlias.
if (MI->getOpcode() == X86::CALLpcrel32 &&
- (STI.getFeatureBits()[X86::Mode64Bit])) {
+ (STI.getFeatureBits()[X86::Is64Bit])) {
OS << "\tcallq\t";
printPCRelImm(MI, Address, 0, OS);
}
@@ -65,8 +65,8 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, uint64_t Address,
// 0x66 to be interpreted as "data16" by the asm printer.
// Thus we add an adjustment here in order to print the "right" instruction.
else if (MI->getOpcode() == X86::DATA16_PREFIX &&
- STI.getFeatureBits()[X86::Mode16Bit]) {
- OS << "\tdata32";
+ STI.getFeatureBits()[X86::Is16Bit]) {
+ OS << "\tdata32";
}
// Try to print any aliases first.
else if (!printAliasInstr(MI, Address, OS) && !printVecCompareInstr(MI, OS))
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 8555f8fb9ce07..89f7d53446244 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -293,7 +293,7 @@ static bool isFirstMacroFusibleInst(const MCInst &Inst,
/// - If the instruction has a ESP/EBP base register, use SS.
/// - Otherwise use DS.
uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
- assert((STI.hasFeature(X86::Mode32Bit) || STI.hasFeature(X86::Mode64Bit)) &&
+ assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
"Prefixes can be added only in 32-bit or 64-bit mode.");
const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
uint64_t TSFlags = Desc.TSFlags;
@@ -334,7 +334,7 @@ uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
if (SegmentReg != 0)
return X86::getSegmentOverridePrefixForReg(SegmentReg);
- if (STI.hasFeature(X86::Mode64Bit))
+ if (STI.hasFeature(X86::Is64Bit))
return X86::CS_Encoding;
if (MemoryOperand >= 0) {
@@ -493,7 +493,7 @@ bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
return false;
// Branches only need to be aligned in 32-bit or 64-bit mode.
- if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit)))
+ if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
return false;
return true;
@@ -755,7 +755,7 @@ bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
void X86AsmBackend::relaxInstruction(MCInst &Inst,
const MCSubtargetInfo &STI) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
- bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
+ bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit];
unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
if (RelaxedOp == Inst.getOpcode()) {
@@ -774,7 +774,7 @@ void X86AsmBackend::relaxInstruction(MCInst &Inst,
static bool isFullyRelaxed(const MCRelaxableFragment &RF) {
auto &Inst = RF.getInst();
auto &STI = *RF.getSubtargetInfo();
- bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
+ bool Is16BitMode = STI.getFeatureBits()[X86::Is16Bit];
return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode();
}
@@ -998,9 +998,9 @@ void X86AsmBackend::finishLayout(MCAssembler const &Asm,
}
unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
- if (STI.hasFeature(X86::Mode16Bit))
+ if (STI.hasFeature(X86::Is16Bit))
return 4;
- if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Mode64Bit))
+ if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
return 1;
if (STI.getFeatureBits()[X86::TuningFast7ByteNOP])
return 7;
@@ -1055,7 +1055,7 @@ bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
};
const char(*Nops)[11] =
- STI->getFeatureBits()[X86::Mode16Bit] ? Nops16Bit : Nops32Bit;
+ STI->getFeatureBits()[X86::Is16Bit] ? Nops16Bit : Nops32Bit;
uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
index 5a99c3b2657c3..e78e98cfc09e7 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
@@ -390,9 +390,9 @@ void X86InstPrinterCommon::printInstFlags(const MCInst *MI, raw_ostream &O,
// Address-Size override prefix
if (Flags & X86::IP_HAS_AD_SIZE &&
!X86_MC::needsAddressSizeOverride(*MI, STI, MemoryOperand, TSFlags)) {
- if (STI.hasFeature(X86::Mode16Bit) || STI.hasFeature(X86::Mode64Bit))
+ if (STI.hasFeature(X86::Is16Bit) || STI.hasFeature(X86::Is64Bit))
O << "\taddr32\t";
- else if (STI.hasFeature(X86::Mode32Bit))
+ else if (STI.hasFeature(X86::Is32Bit))
O << "\taddr16\t";
}
}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
index 5b4b0e0ca6f6c..2a2afa925a9c4 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
@@ -44,7 +44,7 @@ void X86IntelInstPrinter::printInst(const MCInst *MI, uint64_t Address,
// In 16-bit mode, print data16 as data32.
if (MI->getOpcode() == X86::DATA16_PREFIX &&
- STI.getFeatureBits()[X86::Mode16Bit]) {
+ STI.getFeatureBits()[X86::Is16Bit]) {
OS << "\tdata32";
} else if (!printAliasInstr(MI, Address, OS) && !printVecCompareInstr(MI, OS))
printInstruction(MI, Address, OS);
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 527df81562bd9..a21bb6da86de0 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -333,7 +333,7 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
// Handle %rip relative addressing.
if (BaseReg == X86::RIP ||
BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
- assert(STI.hasFeature(X86::Mode64Bit) &&
+ assert(STI.hasFeature(X86::Is64Bit) &&
"Rip-relative addressing requires 64-bit mode");
assert(IndexReg.getReg() == 0 && !ForceSIB &&
"Invalid rip-relative address");
@@ -482,7 +482,7 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
BaseRegNo != N86::ESP &&
// If there is no base register and we're in 64-bit mode, we need a SIB
// byte to emit an addr that is just 'disp32' (the non-RIP relative form).
- (!STI.hasFeature(X86::Mode64Bit) || BaseReg != 0)) {
+ (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
if (BaseReg == 0) { // [disp32] in X86-32 mode
emitByte(modRMByte(0, RegOpcodeField, 5), OS);
@@ -1252,7 +1252,7 @@ bool X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
// Emit the operand size opcode prefix as needed.
if ((TSFlags & X86II::OpSizeMask) ==
- (STI.hasFeature(X86::Mode16Bit) ? X86II::OpSize32 : X86II::OpSize16))
+ (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
emitByte(0x66, OS);
// Emit the LOCK opcode prefix.
@@ -1276,9 +1276,9 @@ bool X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
}
// Handle REX prefix.
- assert((STI.hasFeature(X86::Mode64Bit) || !(TSFlags & X86II::REX_W)) &&
+ assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
"REX.W requires 64bit mode.");
- bool HasREX = STI.hasFeature(X86::Mode64Bit)
+ bool HasREX = STI.hasFeature(X86::Is64Bit)
? emitREXPrefix(MemOperand, MI, STI, OS)
: false;
@@ -1377,7 +1377,7 @@ void X86MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::RawFrm:
emitByte(BaseOpcode + OpcodeOffset, OS);
- if (!STI.hasFeature(X86::Mode64Bit) || !isPCRel32Branch(MI, MCII))
+ if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
break;
const MCOperand &Op = MI.getOperand(CurOp++);
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index b2a424dadb44a..49660883ad833 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -86,7 +86,7 @@ bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
- if (STI.hasFeature(X86::Mode16Bit) && Base.isReg() && Base.getReg() == 0 &&
+ if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 &&
Index.isReg() && Index.getReg() == 0)
return true;
return isMemOperand(MI, Op, X86::GR16RegClassID);
@@ -114,9 +114,9 @@ bool X86_MC::needsAddressSizeOverride(const MCInst &MI,
const MCSubtargetInfo &STI,
int MemoryOperand, uint64_t TSFlags) {
uint64_t AdSize = TSFlags & X86II::AdSizeMask;
- bool Is16BitMode = STI.hasFeature(X86::Mode16Bit);
- bool Is32BitMode = STI.hasFeature(X86::Mode32Bit);
- bool Is64BitMode = STI.hasFeature(X86::Mode64Bit);
+ bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
+ bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
+ bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
if ((Is16BitMode && AdSize == X86II::AdSize32) ||
(Is32BitMode && AdSize == X86II::AdSize16) ||
(Is64BitMode && AdSize == X86II::AdSize32))
@@ -150,15 +150,15 @@ bool X86_MC::needsAddressSizeOverride(const MCInst &MI,
if (MemoryOperand < 0)
return false;
- if (STI.hasFeature(X86::Mode64Bit)) {
+ if (STI.hasFeature(X86::Is64Bit)) {
assert(!is16BitMemOperand(MI, MemoryOperand, STI));
return is32BitMemOperand(MI, MemoryOperand);
}
- if (STI.hasFeature(X86::Mode32Bit)) {
+ if (STI.hasFeature(X86::Is32Bit)) {
assert(!is64BitMemOperand(MI, MemoryOperand));
return is16BitMemOperand(MI, MemoryOperand, STI);
}
- assert(STI.hasFeature(X86::Mode16Bit));
+ assert(STI.hasFeature(X86::Is16Bit));
assert(!is64BitMemOperand(MI, MemoryOperand));
return !is16BitMemOperand(MI, MemoryOperand, STI);
}
diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td
index f9e9021d0d920..579e57f1fb79d 100644
--- a/llvm/lib/Target/X86/X86.td
+++ b/llvm/lib/Target/X86/X86.td
@@ -19,12 +19,12 @@ include "llvm/Target/Target.td"
// X86 Subtarget state
//
-def Mode64Bit : SubtargetFeature<"64bit-mode", "Is64Bit", "true",
- "64-bit mode (x86_64)">;
-def Mode32Bit : SubtargetFeature<"32bit-mode", "Is32Bit", "true",
- "32-bit mode (80386)">;
-def Mode16Bit : SubtargetFeature<"16bit-mode", "Is16Bit", "true",
- "16-bit mode (i8086)">;
+def Is64Bit : SubtargetFeature<"64bit-mode", "Is64Bit", "true",
+ "64-bit mode (x86_64)">;
+def Is32Bit : SubtargetFeature<"32bit-mode", "Is32Bit", "true",
+ "32-bit mode (80386)">;
+def Is16Bit : SubtargetFeature<"16bit-mode", "Is16Bit", "true",
+ "16-bit mode (i8086)">;
//===----------------------------------------------------------------------===//
// X86 Subtarget ISA features
@@ -36,10 +36,10 @@ def FeatureX87 : SubtargetFeature<"x87","HasX87", "true",
def FeatureNOPL : SubtargetFeature<"nopl", "HasNOPL", "true",
"Enable NOPL instruction">;
-def FeatureCMOV : SubtargetFeature<"cmov","HasCMov", "true",
+def FeatureCMOV : SubtargetFeature<"cmov","HasCMOV", "true",
"Enable conditional move instructions">;
-def FeatureCMPXCHG8B : SubtargetFeature<"cx8", "HasCmpxchg8b", "true",
+def FeatureCMPXCHG8B : SubtargetFeature<"cx8", "HasCMPXCHG8B", "true",
"Support CMPXCHG8B instructions">;
def FeatureCRC32 : SubtargetFeature<"crc32", "HasCRC32", "true",
@@ -98,9 +98,9 @@ def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
// without disabling 64-bit mode. Nothing should imply this feature bit. It
// is used to enforce that only 64-bit capable CPUs are used in 64-bit mode.
-def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
+def FeatureX86_64 : SubtargetFeature<"64bit", "HasX86_64", "true",
"Support 64-bit instructions">;
-def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
+def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCMPXCHG16B", "true",
"64-bit with cmpxchg16b",
[FeatureCMPXCHG8B]>;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
@@ -234,7 +234,7 @@ def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
"Support PRFCHW instructions">;
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",
"Support RDSEED instruction">;
-def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true",
+def FeatureLAHFSAHF64 : SubtargetFeature<"sahf", "HasLAHFSAHF64", "true",
"Support LAHF and SAHF instructions in 64-bit mode">;
def FeatureMWAITX : SubtargetFeature<"mwaitx", "HasMWAITX", "true",
"Enable MONITORX/MWAITX timer functionality">;
@@ -637,10 +637,10 @@ def ProcessorFeatures {
// x86-64 and x86-64-v[234]
list<SubtargetFeature> X86_64V1Features = [
FeatureX87, FeatureCMPXCHG8B, FeatureCMOV, FeatureMMX, FeatureSSE2,
- FeatureFXSR, FeatureNOPL, Feature64Bit
+ FeatureFXSR, FeatureNOPL, FeatureX86_64,
];
list<SubtargetFeature> X86_64V2Features = !listconcat(X86_64V1Features, [
- FeatureCMPXCHG16B, FeatureLAHFSAHF, FeatureCRC32, FeaturePOPCNT,
+ FeatureCMPXCHG16B, FeatureLAHFSAHF64, FeatureCRC32, FeaturePOPCNT,
FeatureSSE42
]);
list<SubtargetFeature> X86_64V3Features = !listconcat(X86_64V2Features, [
@@ -878,10 +878,10 @@ def ProcessorFeatures {
FeatureSSSE3,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
FeatureMOVBE,
- FeatureLAHFSAHF];
+ FeatureLAHFSAHF64];
list<SubtargetFeature> AtomTuning = [ProcIntelAtom,
TuningSlowUAMem16,
TuningLEAForSP,
@@ -983,14 +983,14 @@ def ProcessorFeatures {
FeatureMMX,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
FeatureCRC32,
FeaturePOPCNT,
FeaturePCLMUL,
FeatureXSAVE,
FeatureXSAVEOPT,
- FeatureLAHFSAHF,
+ FeatureLAHFSAHF64,
FeatureAES,
FeatureRDRAND,
FeatureF16C,
@@ -1031,9 +1031,9 @@ def ProcessorFeatures {
FeaturePRFCHW,
FeatureLZCNT,
FeaturePOPCNT,
- FeatureLAHFSAHF,
+ FeatureLAHFSAHF64,
FeatureCMOV,
- Feature64Bit];
+ FeatureX86_64];
list<SubtargetFeature> BarcelonaTuning = [TuningFastScalarShiftMasks,
TuningSlowSHLD,
TuningSBBDepBreaking,
@@ -1048,12 +1048,12 @@ def ProcessorFeatures {
FeatureSSE4A,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
FeaturePRFCHW,
FeatureLZCNT,
FeaturePOPCNT,
- FeatureLAHFSAHF];
+ FeatureLAHFSAHF64];
list<SubtargetFeature> BtVer1Tuning = [TuningFast15ByteNOP,
TuningFastScalarShiftMasks,
TuningFastVectorShiftMasks,
@@ -1088,7 +1088,7 @@ def ProcessorFeatures {
FeatureCMPXCHG8B,
FeatureCMOV,
FeatureXOP,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
FeatureAES,
FeatureCRC32,
@@ -1101,7 +1101,7 @@ def ProcessorFeatures {
FeaturePOPCNT,
FeatureXSAVE,
FeatureLWP,
- FeatureLAHFSAHF];
+ FeatureLAHFSAHF64];
list<SubtargetFeature> BdVer1Tuning = [TuningSlowSHLD,
TuningFast11ByteNOP,
TuningFastScalarShiftMasks,
@@ -1148,7 +1148,7 @@ def ProcessorFeatures {
FeatureCLFLUSHOPT,
FeatureCLZERO,
FeatureCMOV,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
FeatureCRC32,
FeatureF16C,
@@ -1156,7 +1156,7 @@ def ProcessorFeatures {
FeatureFSGSBase,
FeatureFXSR,
FeatureNOPL,
- FeatureLAHFSAHF,
+ FeatureLAHFSAHF64,
FeatureLZCNT,
FeatureMMX,
FeatureMOVBE,
@@ -1220,13 +1220,13 @@ class ProcModel<string Name, SchedMachineModel Model,
// NOTE: CMPXCHG8B is here for legacy compatibility so that it is only disabled
// if i386/i486 is specifically requested.
// NOTE: 64Bit is here as "generic" is the default llc CPU. The X86Subtarget
-// constructor checks that any CPU used in 64-bit mode has Feature64Bit enabled.
-// It has no effect on code generation.
+// constructor checks that any CPU used in 64-bit mode has FeatureX86_64
+// enabled. It has no effect on code generation.
// NOTE: As a default tuning, "generic" aims to produce code optimized for the
// most common X86 processors. The tunings might be changed over time. It is
// recommended to use "x86-64" in lit tests for consistency.
def : ProcModel<"generic", SandyBridgeModel,
- [FeatureX87, FeatureCMPXCHG8B, Feature64Bit],
+ [FeatureX87, FeatureCMPXCHG8B, FeatureX86_64],
[TuningSlow3OpsLEA,
TuningSlowDivide64,
TuningMacroFusion,
@@ -1306,7 +1306,7 @@ def : ProcModel<"nocona", GenericPostRAModel, [
FeatureSSE3,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
],
[
@@ -1323,9 +1323,9 @@ def : ProcModel<"core2", SandyBridgeModel, [
FeatureSSSE3,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF64
],
[
TuningMacroFusion,
@@ -1340,9 +1340,9 @@ def : ProcModel<"penryn", SandyBridgeModel, [
FeatureSSE41,
FeatureFXSR,
FeatureNOPL,
- Feature64Bit,
+ FeatureX86_64,
FeatureCMPXCHG16B,
- FeatureLAHFSAHF
+ FeatureLAHFSAHF64
],
[
TuningMacroFusion,
@@ -1452,7 +1452,7 @@ foreach P = ["athlon-4", "athlon-xp", "athlon-mp"] in {
foreach P = ["k8", "opteron", "athlon64", "athlon-fx"] in {
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureSSE2, Feature3DNowA,
- FeatureFXSR, FeatureNOPL, Feature64Bit, FeatureCMOV],
+ FeatureFXSR, FeatureNOPL, FeatureX86_64, FeatureCMOV],
[TuningFastScalarShiftMasks, TuningSlowSHLD, TuningSlowUAMem16,
TuningSBBDepBreaking, TuningInsertVZEROUPPER]>;
}
@@ -1460,7 +1460,7 @@ foreach P = ["k8", "opteron", "athlon64", "athlon-fx"] in {
foreach P = ["k8-sse3", "opteron-sse3", "athlon64-sse3"] in {
def : Proc<P, [FeatureX87, FeatureCMPXCHG8B, FeatureSSE3, Feature3DNowA,
FeatureFXSR, FeatureNOPL, FeatureCMPXCHG16B, FeatureCMOV,
- Feature64Bit],
+ FeatureX86_64],
[TuningFastScalarShiftMasks, TuningSlowSHLD, TuningSlowUAMem16,
TuningSBBDepBreaking, TuningInsertVZEROUPPER]>;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 80014359e82e8..7f169d8bb83d2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -170,7 +170,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
// FIXME: Should we be limiting the atomic size on other configs? Default is
// 1024.
- if (!Subtarget.hasCmpxchg8b())
+ if (!Subtarget.hasCMPXCHG8B())
setMaxAtomicSizeInBitsSupported(32);
// Set up the register classes.
@@ -516,7 +516,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!Subtarget.is64Bit())
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
- if (Subtarget.hasCmpxchg16b()) {
+ if (Subtarget.hasCMPXCHG16B()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
@@ -30355,9 +30355,9 @@ bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
- return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
+ return Subtarget.hasCMPXCHG8B() && !Subtarget.is64Bit();
if (OpWidth == 128)
- return Subtarget.hasCmpxchg16b();
+ return Subtarget.hasCMPXCHG16B();
return false;
}
@@ -32600,7 +32600,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
EVT T = N->getValueType(0);
assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
bool Regs64bit = T == MVT::i128;
- assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
+ assert((!Regs64bit || Subtarget.hasCMPXCHG16B()) &&
"64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
SDValue cpInL, cpInH;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index 02e10988f57fe..f4f185fafe899 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -981,8 +981,8 @@ def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
-def HasCmpxchg8b : Predicate<"Subtarget->hasCmpxchg8b()">;
-def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
+def HasCmpxchg8b : Predicate<"Subtarget->hasCMPXCHG8B()">;
+def HasCmpxchg16b: Predicate<"Subtarget->hasCMPXCHG16B()">;
def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">;
def HasKL : Predicate<"Subtarget->hasKL()">;
@@ -996,17 +996,17 @@ def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">;
def HasUINTR : Predicate<"Subtarget->hasUINTR()">;
def HasCRC32 : Predicate<"Subtarget->hasCRC32()">;
def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
- AssemblerPredicate<(all_of (not Mode64Bit)), "Not 64-bit mode">;
+ AssemblerPredicate<(all_of (not Is64Bit)), "Not 64-bit mode">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">,
- AssemblerPredicate<(all_of Mode64Bit), "64-bit mode">;
+ AssemblerPredicate<(all_of Is64Bit), "64-bit mode">;
def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
def In16BitMode : Predicate<"Subtarget->is16Bit()">,
- AssemblerPredicate<(all_of Mode16Bit), "16-bit mode">;
+ AssemblerPredicate<(all_of Is16Bit), "16-bit mode">;
def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
- AssemblerPredicate<(all_of (not Mode16Bit)), "Not 16-bit mode">;
+ AssemblerPredicate<(all_of (not Is16Bit)), "Not 16-bit mode">;
def In32BitMode : Predicate<"Subtarget->is32Bit()">,
- AssemblerPredicate<(all_of Mode32Bit), "32-bit mode">;
+ AssemblerPredicate<(all_of Is32Bit), "32-bit mode">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index d5f2597164df0..30574e9048282 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -76,7 +76,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
bool HasX87 = false;
/// True if the processor supports CMPXCHG8B.
- bool HasCmpxchg8b = false;
+ bool HasCMPXCHG8B = false;
/// True if this processor has NOPL instruction
/// (generally pentium pro+).
@@ -84,7 +84,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
/// True if this processor has conditional move instructions
/// (generally pentium pro+).
- bool HasCMov = false;
+ bool HasCMOV = false;
/// True if the processor supports X86-64 instructions.
bool HasX86_64 = false;
@@ -227,7 +227,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
/// True if this processor has the CMPXCHG16B instruction;
/// this is true for most x86-64 chips, but not the first AMD chips.
- bool HasCmpxchg16b = false;
+ bool HasCMPXCHG16B = false;
/// True if the LEA instruction should be used for adjusting
/// the stack pointer. This is an optimization for Intel Atom processors.
@@ -632,11 +632,11 @@ class X86Subtarget final : public X86GenSubtargetInfo {
void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
bool hasX87() const { return HasX87; }
- bool hasCmpxchg8b() const { return HasCmpxchg8b; }
+ bool hasCMPXCHG8B() const { return HasCMPXCHG8B; }
bool hasNOPL() const { return HasNOPL; }
// SSE codegen depends on cmovs, and all SSE1+ processors support them.
// All 64-bit processors support cmov.
- bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
+ bool hasCMov() const { return HasCMOV || X86SSELevel >= SSE1 || is64Bit(); }
bool hasSSE1() const { return X86SSELevel >= SSE1; }
bool hasSSE2() const { return X86SSELevel >= SSE2; }
bool hasSSE3() const { return X86SSELevel >= SSE3; }
@@ -712,7 +712,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
bool isUnalignedMem16Slow() const { return IsUnalignedMem16Slow; }
bool isUnalignedMem32Slow() const { return IsUnalignedMem32Slow; }
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
- bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
+ bool hasCMPXCHG16B() const { return HasCMPXCHG16B && is64Bit(); }
bool useLeaForSP() const { return UseLeaForSP; }
bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index 69715072426f1..88592d0b499e9 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -38,12 +38,12 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
const FeatureBitset InlineFeatureIgnoreList = {
// This indicates the CPU is 64 bit capable not that we are in 64-bit
// mode.
- X86::Feature64Bit,
+ X86::FeatureX86_64,
// These features don't have any intrinsics or ABI effect.
X86::FeatureNOPL,
X86::FeatureCMPXCHG16B,
- X86::FeatureLAHFSAHF,
+ X86::FeatureLAHFSAHF64,
// Some older targets can be setup to fold unaligned loads.
X86::FeatureSSEUnalignedMem,
More information about the llvm-commits
mailing list