[llvm] Optimize loops in MC. (PR #98114)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 8 22:52:04 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mc
Author: Dmitriy Chestnykh (chestnykh)
<details>
<summary>Changes</summary>
https://llvm.org/docs/CodingStandards.html tells us that we should avoid evaluating `.end()` each time if possible.
---
Full diff: https://github.com/llvm/llvm-project/pull/98114.diff
3 Files Affected:
- (modified) llvm/lib/MC/MCParser/AsmParser.cpp (+3-3)
- (modified) llvm/lib/MC/MCParser/MasmParser.cpp (+3-3)
- (modified) llvm/lib/MC/XCOFFObjectWriter.cpp (+14-11)
``````````diff
diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index f3caa90eedfb1..dba56b15424d5 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -6116,8 +6116,8 @@ bool AsmParser::parseMSInlineAsm(
const char *AsmStart = ASMString.begin();
const char *AsmEnd = ASMString.end();
array_pod_sort(AsmStrRewrites.begin(), AsmStrRewrites.end(), rewritesSort);
- for (auto it = AsmStrRewrites.begin(); it != AsmStrRewrites.end(); ++it) {
- const AsmRewrite &AR = *it;
+ for (auto I = AsmStrRewrites.begin(), E = AsmStrRewrites.end(); I != E; ++I) {
+ const AsmRewrite &AR = *I;
// Check if this has already been covered by another rewrite...
if (AR.Done)
continue;
@@ -6160,7 +6160,7 @@ bool AsmParser::parseMSInlineAsm(
SMLoc OffsetLoc = SMLoc::getFromPointer(AR.IntelExp.OffsetName.data());
size_t OffsetLen = OffsetName.size();
auto rewrite_it = std::find_if(
- it, AsmStrRewrites.end(), [&](const AsmRewrite &FusingAR) {
+ I, AsmStrRewrites.end(), [&](const AsmRewrite &FusingAR) {
return FusingAR.Loc == OffsetLoc && FusingAR.Len == OffsetLen &&
(FusingAR.Kind == AOK_Input ||
FusingAR.Kind == AOK_CallInput);
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index 653cc64b4c36a..f64b7f62d61d0 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -7474,8 +7474,8 @@ bool MasmParser::parseMSInlineAsm(
const char *AsmStart = ASMString.begin();
const char *AsmEnd = ASMString.end();
array_pod_sort(AsmStrRewrites.begin(), AsmStrRewrites.end(), rewritesSort);
- for (auto it = AsmStrRewrites.begin(); it != AsmStrRewrites.end(); ++it) {
- const AsmRewrite &AR = *it;
+ for (auto I = AsmStrRewrites.begin(), E = AsmStrRewrites.end(); I != E; ++I) {
+ const AsmRewrite &AR = *I;
// Check if this has already been covered by another rewrite...
if (AR.Done)
continue;
@@ -7518,7 +7518,7 @@ bool MasmParser::parseMSInlineAsm(
SMLoc OffsetLoc = SMLoc::getFromPointer(AR.IntelExp.OffsetName.data());
size_t OffsetLen = OffsetName.size();
auto rewrite_it = std::find_if(
- it, AsmStrRewrites.end(), [&](const AsmRewrite &FusingAR) {
+ I, AsmStrRewrites.end(), [&](const AsmRewrite &FusingAR) {
return FusingAR.Loc == OffsetLoc && FusingAR.Len == OffsetLen &&
(FusingAR.Kind == AOK_Input ||
FusingAR.Kind == AOK_CallInput);
diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp
index 03a4ee831d895..122f19367ae7b 100644
--- a/llvm/lib/MC/XCOFFObjectWriter.cpp
+++ b/llvm/lib/MC/XCOFFObjectWriter.cpp
@@ -1385,11 +1385,12 @@ void XCOFFObjectWriter::addExceptionEntry(
unsigned XCOFFObjectWriter::getExceptionSectionSize() {
unsigned EntryNum = 0;
- for (auto it = ExceptionSection.ExceptionTable.begin();
- it != ExceptionSection.ExceptionTable.end(); ++it)
+ for (auto I = ExceptionSection.ExceptionTable.begin(),
+ E = ExceptionSection.ExceptionTable.end();
+ I != E; ++I)
// The size() gets +1 to account for the initial entry containing the
// symbol table index.
- EntryNum += it->second.Entries.size() + 1;
+ EntryNum += I->second.Entries.size() + 1;
return EntryNum * (is64Bit() ? XCOFF::ExceptionSectionEntrySize64
: XCOFF::ExceptionSectionEntrySize32);
@@ -1397,11 +1398,12 @@ unsigned XCOFFObjectWriter::getExceptionSectionSize() {
unsigned XCOFFObjectWriter::getExceptionOffset(const MCSymbol *Symbol) {
unsigned EntryNum = 0;
- for (auto it = ExceptionSection.ExceptionTable.begin();
- it != ExceptionSection.ExceptionTable.end(); ++it) {
- if (Symbol == it->second.FunctionSymbol)
+ for (auto I = ExceptionSection.ExceptionTable.begin(),
+ E = ExceptionSection.ExceptionTable.end();
+ I != E; ++I) {
+ if (Symbol == I->second.FunctionSymbol)
break;
- EntryNum += it->second.Entries.size() + 1;
+ EntryNum += I->second.Entries.size() + 1;
}
return EntryNum * (is64Bit() ? XCOFF::ExceptionSectionEntrySize64
: XCOFF::ExceptionSectionEntrySize32);
@@ -1667,17 +1669,18 @@ void XCOFFObjectWriter::writeSectionForDwarfSectionEntry(
void XCOFFObjectWriter::writeSectionForExceptionSectionEntry(
const MCAssembler &Asm, ExceptionSectionEntry &ExceptionEntry,
uint64_t &CurrentAddressLocation) {
- for (auto it = ExceptionEntry.ExceptionTable.begin();
- it != ExceptionEntry.ExceptionTable.end(); it++) {
+ for (auto I = ExceptionEntry.ExceptionTable.begin(),
+ E = ExceptionEntry.ExceptionTable.end();
+ I != E; ++I) {
// For every symbol that has exception entries, you must start the entries
// with an initial symbol table index entry
- W.write<uint32_t>(SymbolIndexMap[it->second.FunctionSymbol]);
+ W.write<uint32_t>(SymbolIndexMap[I->second.FunctionSymbol]);
if (is64Bit()) {
// 4-byte padding on 64-bit.
W.OS.write_zeros(4);
}
W.OS.write_zeros(2);
- for (auto &TrapEntry : it->second.Entries) {
+ for (auto &TrapEntry : I->second.Entries) {
writeWord(TrapEntry.TrapAddress);
W.write<uint8_t>(TrapEntry.Lang);
W.write<uint8_t>(TrapEntry.Reason);
``````````
</details>
https://github.com/llvm/llvm-project/pull/98114
More information about the llvm-commits
mailing list