[Lldb-commits] [lldb] [llvm] [LLDB][Minidump] Add 64b support to LLDB's minidump file builder. (PR #95312)
Greg Clayton via lldb-commits
lldb-commits at lists.llvm.org
Thu Jun 13 13:55:52 PDT 2024
================
@@ -858,10 +937,225 @@ Status MinidumpFileBuilder::Dump(lldb::FileUP &core_file) const {
return error;
}
-size_t MinidumpFileBuilder::GetDirectoriesNum() const {
- return m_directories.size();
+Status MinidumpFileBuilder::AddMemoryList_32(
+ const ProcessSP &process_sp, const Process::CoreFileMemoryRanges &ranges) {
+ std::vector<MemoryDescriptor> descriptors;
+ Status error;
+ Log *log = GetLog(LLDBLog::Object);
+ size_t region_index = 0;
+ for (const auto &core_range : ranges) {
+ // Take the offset before we write.
+ const size_t offset_for_data = GetCurrentDataEndOffset();
+ const addr_t addr = core_range.range.start();
+ const addr_t size = core_range.range.size();
+ auto data_up = std::make_unique<DataBufferHeap>(size, 0);
+
+ LLDB_LOGF(
+ log,
+ "AddMemoryList %zu/%zu reading memory for region (%zu bytes) [%zx, %zx)",
+ region_index, ranges.size(), size, addr, addr + size);
+ ++region_index;
+
+ const size_t bytes_read =
+ process_sp->ReadMemory(addr, data_up->GetBytes(), size, error);
+ if (error.Fail() || bytes_read == 0) {
+ LLDB_LOGF(log, "Failed to read memory region. Bytes read: %zu, error: %s",
+ bytes_read, error.AsCString());
+ // Just skip sections with errors or zero bytes in 32b mode
+ continue;
+ } else if (bytes_read != size) {
+ LLDB_LOGF(log, "Memory region at: %zu failed to read %zu bytes", addr,
+ size);
+ }
+
+ MemoryDescriptor descriptor;
+ descriptor.StartOfMemoryRange =
+ static_cast<llvm::support::ulittle64_t>(addr);
+ descriptor.Memory.DataSize =
+ static_cast<llvm::support::ulittle32_t>(bytes_read);
+ descriptor.Memory.RVA =
+ static_cast<llvm::support::ulittle32_t>(offset_for_data);
+ descriptors.push_back(descriptor);
+ if (m_thread_by_range_start.count(addr) > 0) {
+ m_thread_by_range_start[addr].Stack = descriptor;
+ }
+
+ // Add the data to the buffer, flush as needed.
+ error = AddData(data_up->GetBytes(), bytes_read);
+ if (error.Fail())
+ return error;
+ }
+
+ // Add a directory that references this list
+ // With a size of the number of ranges as a 32 bit num
+ // And then the size of all the ranges
+ AddDirectory(StreamType::MemoryList,
+ sizeof(llvm::support::ulittle32_t) +
+ descriptors.size() *
+ sizeof(llvm::minidump::MemoryDescriptor));
+
+ llvm::support::ulittle32_t memory_ranges_num =
+ static_cast<llvm::support::ulittle32_t>(descriptors.size());
+ m_data.AppendData(&memory_ranges_num, sizeof(llvm::support::ulittle32_t));
+ // For 32b we can get away with writing off the descriptors after the data.
+ // This means no cleanup loop needed.
+ m_data.AppendData(descriptors.data(),
+ descriptors.size() * sizeof(MemoryDescriptor));
+
+ return error;
}
-size_t MinidumpFileBuilder::GetCurrentDataEndOffset() const {
- return sizeof(llvm::minidump::Header) + m_data.GetByteSize();
+Status MinidumpFileBuilder::AddMemoryList_64(
+ const ProcessSP &process_sp, const Process::CoreFileMemoryRanges &ranges) {
+ AddDirectory(StreamType::Memory64List,
+ (sizeof(llvm::support::ulittle64_t) * 2) +
+ ranges.size() * sizeof(llvm::minidump::MemoryDescriptor_64));
+
+ llvm::support::ulittle64_t memory_ranges_num =
+ static_cast<llvm::support::ulittle64_t>(ranges.size());
+ m_data.AppendData(&memory_ranges_num, sizeof(llvm::support::ulittle64_t));
+ llvm::support::ulittle64_t memory_ranges_base_rva =
+ static_cast<llvm::support::ulittle64_t>(GetCurrentDataEndOffset());
+ m_data.AppendData(&memory_ranges_base_rva,
+ sizeof(llvm::support::ulittle64_t));
+ // Capture the starting offset, so we can do cleanup later if needed.
+ uint64_t starting_offset = GetCurrentDataEndOffset();
+
+ bool cleanup_required = false;
+ std::vector<MemoryDescriptor_64> descriptors;
+ // Enumerate the ranges and create the memory descriptors so we can append
+ // them first
+ for (const auto core_range : ranges) {
+ // Add the space required to store the memory descriptor
+ MemoryDescriptor_64 memory_desc;
+ memory_desc.StartOfMemoryRange =
+ static_cast<llvm::support::ulittle64_t>(core_range.range.start());
+ memory_desc.DataSize =
+ static_cast<llvm::support::ulittle64_t>(core_range.range.size());
+ descriptors.push_back(memory_desc);
+ // Now write this memory descriptor to the buffer.
+ m_data.AppendData(&memory_desc, sizeof(MemoryDescriptor_64));
+ }
+
+ Status error;
+ Log *log = GetLog(LLDBLog::Object);
+ size_t region_index = 0;
+ for (const auto &core_range : ranges) {
+ const addr_t addr = core_range.range.start();
+ const addr_t size = core_range.range.size();
+ auto data_up = std::make_unique<DataBufferHeap>(size, 0);
+
+ LLDB_LOGF(log,
+ "AddMemoryList_64 %zu/%zu reading memory for region (%zu bytes) "
+ "[%zx, %zx)",
+ region_index, ranges.size(), size, addr, addr + size);
+ ++region_index;
+
+ const size_t bytes_read =
+ process_sp->ReadMemory(addr, data_up->GetBytes(), size, error);
+ if (error.Fail()) {
+ LLDB_LOGF(log, "Failed to read memory region. Bytes read: %zu, error: %s",
+ bytes_read, error.AsCString());
+ error.Clear();
+ cleanup_required = true;
+ descriptors[region_index].DataSize = 0;
+ }
+ if (bytes_read != size) {
+ LLDB_LOGF(log, "Memory region at: %zu failed to read %zu bytes", addr,
+ size);
+ cleanup_required = true;
+ descriptors[region_index].DataSize = bytes_read;
+ }
+
+ // Add the data to the buffer, flush as needed.
+ error = AddData(data_up->GetBytes(), bytes_read);
+ if (error.Fail())
+ return error;
+ }
+
+ // Early return if there is no cleanup needed.
+ if (!cleanup_required) {
+ return error;
+ } else {
+ // Flush to disk we can make the fixes in place.
+ FlushToDisk();
+ // Fixup the descriptors that were not read correctly.
+ m_core_file->SeekFromStart(starting_offset);
+ size_t bytes_written = sizeof(MemoryDescriptor_64) * descriptors.size();
+ error = m_core_file->Write(descriptors.data(), bytes_written);
+ if (error.Fail() ||
+ bytes_written != sizeof(MemoryDescriptor_64) * descriptors.size()) {
+ error.SetErrorStringWithFormat(
+ "unable to write the memory descriptors (written %zd/%zd)",
+ bytes_written, sizeof(MemoryDescriptor_64) * descriptors.size());
+ }
+
+ return error;
+ }
+}
+
+Status MinidumpFileBuilder::AddData(const void *data, addr_t size) {
+ // This should also get chunked, because worst case we copy over a big
+ // object / memory range, say 5gb. In that case, we'd have to allocate 10gb
+ // 5 gb for the buffer we're copying from, and then 5gb for the buffer we're
+ // copying to. Which will be short lived and immedaitely go to disk, the goal
+ // here is to limit the number of bytes we need to host in memory at any given
+ // time.
+ m_data.AppendData(data, size);
+ if (m_data.GetByteSize() > m_write_chunk_max) {
+ return FlushToDisk();
+ }
+
+ return Status();
+}
+
+Status MinidumpFileBuilder::FlushToDisk() {
+ Status error;
+ // Set the stream to it's end.
+ m_core_file->SeekFromEnd(0);
+ addr_t starting_size = m_data.GetByteSize();
+ addr_t remaining_bytes = starting_size;
+ size_t offset = 0;
+
+ // Unix/Linux OS's return SSIZE for operations, this means a max of 2gb per IO
+ // operation so we chunk it here. We keep the file size small to try to
+ // minimize memory use.
+ while (remaining_bytes > 0) {
----------------
clayborg wrote:
yes, that is a good approach. As long as we know that we won't get an error if some bytes were written
https://github.com/llvm/llvm-project/pull/95312
More information about the lldb-commits
mailing list