[Lldb-commits] [lldb] Centralize the code that figures out which memory ranges to save into core files (PR #71772)
Alex Langford via lldb-commits
lldb-commits at lists.llvm.org
Thu Nov 9 11:10:50 PST 2023
================
@@ -6252,3 +6243,188 @@ Status Process::WriteMemoryTags(lldb::addr_t addr, size_t len,
return DoWriteMemoryTags(addr, len, tag_manager->GetAllocationTagType(),
*packed_tags);
}
+
+// Create a CoreFileMemoryRange from a MemoryRegionInfo
+static Process::CoreFileMemoryRange
+CreateCoreFileMemoryRange(const MemoryRegionInfo ®ion) {
+ const addr_t addr = region.GetRange().GetRangeBase();
+ llvm::AddressRange range(addr, addr + region.GetRange().GetByteSize());
+ return {range, region.GetLLDBPermissions()};
+}
+
+// Add dirty pages to the core file ranges and return true if dirty pages
+// were added. Return false if the dirty page information is not valid or in
+// the region.
+static bool AddDirtyPages(const MemoryRegionInfo ®ion,
+ Process::CoreFileMemoryRanges &ranges) {
+ const auto &dirty_page_list = region.GetDirtyPageList();
+ if (!dirty_page_list)
+ return false;
+ const uint32_t lldb_permissions = region.GetLLDBPermissions();
+ const addr_t page_size = region.GetPageSize();
+ if (page_size == 0)
+ return false;
+ llvm::AddressRange range(0, 0);
+ for (addr_t page_addr : *dirty_page_list) {
+ if (range.empty()) {
+ // No range yet, initialize the range with the current dirty page.
+ range = llvm::AddressRange(page_addr, page_addr + page_size);
+ } else {
+ if (range.end() == page_addr) {
+ // Combine consective ranges.
+ range = llvm::AddressRange(range.start(), page_addr + page_size);
+ } else {
+ // Add previous contiguous range and init the new range with the
+ // current dirty page.
+ ranges.push_back({range, lldb_permissions});
+ range = llvm::AddressRange(page_addr, page_addr + page_size);
+ }
+ }
+ }
+ // The last range
+ if (!range.empty())
+ ranges.push_back({range, lldb_permissions});
+ return true;
+}
+
+// Given a region, add the region to \a ranges.
+//
+// Only add the region if it isn't empty and if it has some permissions.
+// If \a try_dirty_pages is true, then try to add only the dirty pages for a
+// given region. If the region has dirty page information, only dirty pages
+// will be added to \a ranges, else the entire range will be added to \a
+// ranges.
+static void AddRegion(const MemoryRegionInfo ®ion, bool try_dirty_pages,
+ Process::CoreFileMemoryRanges &ranges) {
+ // Don't add empty ranges or ranges with no permissions.
+ if (region.GetRange().GetByteSize() == 0 || region.GetLLDBPermissions() == 0)
+ return;
+ if (try_dirty_pages && AddDirtyPages(region, ranges))
+ return;
+ ranges.push_back(CreateCoreFileMemoryRange(region));
+}
+
+// Save all memory regions that are not empty or have at least some permissions
+// for a full core file style.
+static Status
+GetCoreFileSaveRangesFull(Process &process,
+ const MemoryRegionInfos ®ions,
+ Process::CoreFileMemoryRanges &ranges) {
+
+ // Don't add only dirty pages, add full regions.
+ const bool try_dirty_pages = false;
+ for (const auto ®ion: regions)
+ AddRegion(region, try_dirty_pages, ranges);
+ return Status();
+}
+
+// Save only the dirty pages to the core file. Make sure the process has at
+// least some dirty pages, as some OS versions don't support reporting what
+// pages are dirty within an memory region. If no memory regions have dirty
+// page information, return an error.
+static Status
+GetCoreFileSaveRangesDirtyOnly(Process &process,
+ const MemoryRegionInfos ®ions,
+ Process::CoreFileMemoryRanges &ranges) {
+ // Iterate over the regions and find all dirty pages.
+ bool have_dirty_page_info = false;
+ for (const auto ®ion: regions) {
+ if (AddDirtyPages(region, ranges))
+ have_dirty_page_info = true;
+ }
+
+ if (!have_dirty_page_info)
+ return Status("no process memory regions have dirty page information");
+
+ return Status();
+}
+
+// Save all thread stacks to the core file. Some OS versions support reporting
+// when a memory region is stack related. We check on this information, but we
+// also use the stack pointers of each thread and add those in case the OS
+// doesn't support reporting stack memory. This function does unique the stack
+// regions and won't add the same range twice. This function also attempts to
----------------
bulbazord wrote:
I think you can drop the part about uniquing stack regions and just say that it won't add the same range twice.
https://github.com/llvm/llvm-project/pull/71772
More information about the lldb-commits
mailing list