1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 14:07:46 +00:00

Kernel: Don't wrap AddressSpace's RegionTree in SpinlockProtected

Now that AddressSpace itself is always SpinlockProtected, we don't
need to also wrap the RegionTree. Whoever has the AddressSpace locked
is free to poke around its tree.
This commit is contained in:
Andreas Kling 2022-08-23 20:30:12 +02:00
parent d3e8eb5918
commit da24a937f5
6 changed files with 212 additions and 265 deletions

View file

@ -47,18 +47,16 @@ Coredump::Coredump(NonnullLockRefPtr<Process> process, NonnullLockRefPtr<OpenFil
{
m_num_program_headers = 0;
m_process->address_space().with([&](auto& space) {
space->region_tree().with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
for (auto& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
++m_num_program_headers;
}
});
if (region.access() == Memory::Region::Access::None)
continue;
++m_num_program_headers;
}
});
++m_num_program_headers; // +1 for NOTE segment
}
@ -138,39 +136,37 @@ ErrorOr<void> Coredump::write_program_headers(size_t notes_size)
{
size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr));
m_process->address_space().with([&](auto& space) {
space->region_tree().with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
for (auto& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
ElfW(Phdr) phdr {};
ElfW(Phdr) phdr {};
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = region.vaddr().get();
phdr.p_paddr = 0;
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = region.vaddr().get();
phdr.p_paddr = 0;
phdr.p_filesz = region.page_count() * PAGE_SIZE;
phdr.p_memsz = region.page_count() * PAGE_SIZE;
phdr.p_align = 0;
phdr.p_filesz = region.page_count() * PAGE_SIZE;
phdr.p_memsz = region.page_count() * PAGE_SIZE;
phdr.p_align = 0;
phdr.p_flags = region.is_readable() ? PF_R : 0;
if (region.is_writable())
phdr.p_flags |= PF_W;
if (region.is_executable())
phdr.p_flags |= PF_X;
phdr.p_flags = region.is_readable() ? PF_R : 0;
if (region.is_writable())
phdr.p_flags |= PF_W;
if (region.is_executable())
phdr.p_flags |= PF_X;
offset += phdr.p_filesz;
offset += phdr.p_filesz;
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
}
});
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
}
});
ElfW(Phdr) notes_pheader {};
@ -192,39 +188,37 @@ ErrorOr<void> Coredump::write_regions()
{
u8 zero_buffer[PAGE_SIZE] = {};
return m_process->address_space().with([&](auto& space) {
return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto& region : region_tree.regions()) {
VERIFY(!region.is_kernel());
return m_process->address_space().with([&](auto& space) -> ErrorOr<void> {
for (auto& region : space->region_tree().regions()) {
VERIFY(!region.is_kernel());
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
if (!region.is_mapped())
continue;
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
if (!region.is_mapped())
continue;
region.set_readable(true);
region.remap();
region.set_readable(true);
region.remap();
for (size_t i = 0; i < region.page_count(); i++) {
auto page = region.physical_page(i);
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
if (page)
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
// If the current page is not backed by a physical page, we zero it in the coredump file.
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
}();
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
}
for (size_t i = 0; i < region.page_count(); i++) {
auto page = region.physical_page(i);
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
if (page)
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
// If the current page is not backed by a physical page, we zero it in the coredump file.
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
}();
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
}
return {};
});
}
return {};
});
}
@ -285,36 +279,34 @@ ErrorOr<void> Coredump::create_notes_threads_data(auto& builder) const
ErrorOr<void> Coredump::create_notes_regions_data(auto& builder) const
{
size_t region_index = 0;
return m_process->address_space().with([&](auto& space) {
return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
return m_process->address_space().with([&](auto& space) -> ErrorOr<void> {
for (auto const& region : space->region_tree().regions()) {
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
if (looks_like_userspace_heap_region(region))
continue;
if (looks_like_userspace_heap_region(region))
continue;
#endif
if (region.access() == Memory::Region::Access::None)
continue;
if (region.access() == Memory::Region::Access::None)
continue;
ELF::Core::MemoryRegionInfo info {};
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
ELF::Core::MemoryRegionInfo info {};
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
info.region_start = region.vaddr().get();
info.region_end = region.vaddr().offset(region.size()).get();
info.program_header_index = region_index++;
info.region_start = region.vaddr().get();
info.region_end = region.vaddr().offset(region.size()).get();
info.program_header_index = region_index++;
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
// NOTE: The region name *is* null-terminated, so the following is ok:
auto name = region.name();
if (name.is_empty())
TRY(builder.append('\0'));
else
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
}
return {};
});
// NOTE: The region name *is* null-terminated, so the following is ok:
auto name = region.name();
if (name.is_empty())
TRY(builder.append('\0'));
else
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
}
return {};
});
}