mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 19:07:35 +00:00
Kernel: Wrap process address spaces in SpinlockProtected
This forces anyone who wants to look into and/or manipulate an address space to lock it. And this replaces the previous, more flimsy, manual spinlock use. Note that pointers *into* the address space are not safe to use after you unlock the space. We've got many issues like this, and we'll have to track those down as wlel.
This commit is contained in:
parent
d6ef18f587
commit
cf16b2c8e6
38 changed files with 708 additions and 627 deletions
|
@ -46,17 +46,19 @@ Coredump::Coredump(NonnullLockRefPtr<Process> process, NonnullLockRefPtr<OpenFil
|
|||
, m_description(move(description))
|
||||
{
|
||||
m_num_program_headers = 0;
|
||||
m_process->address_space().region_tree().with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
m_process->address_space().with([&](auto& space) {
|
||||
space->region_tree().with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
++m_num_program_headers;
|
||||
}
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
++m_num_program_headers;
|
||||
}
|
||||
});
|
||||
});
|
||||
++m_num_program_headers; // +1 for NOTE segment
|
||||
}
|
||||
|
@ -135,38 +137,40 @@ ErrorOr<void> Coredump::write_elf_header()
|
|||
ErrorOr<void> Coredump::write_program_headers(size_t notes_size)
|
||||
{
|
||||
size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr));
|
||||
m_process->address_space().region_tree().with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
m_process->address_space().with([&](auto& space) {
|
||||
space->region_tree().with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
|
||||
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
|
||||
ElfW(Phdr) phdr {};
|
||||
ElfW(Phdr) phdr {};
|
||||
|
||||
phdr.p_type = PT_LOAD;
|
||||
phdr.p_offset = offset;
|
||||
phdr.p_vaddr = region.vaddr().get();
|
||||
phdr.p_paddr = 0;
|
||||
phdr.p_type = PT_LOAD;
|
||||
phdr.p_offset = offset;
|
||||
phdr.p_vaddr = region.vaddr().get();
|
||||
phdr.p_paddr = 0;
|
||||
|
||||
phdr.p_filesz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_memsz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_align = 0;
|
||||
phdr.p_filesz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_memsz = region.page_count() * PAGE_SIZE;
|
||||
phdr.p_align = 0;
|
||||
|
||||
phdr.p_flags = region.is_readable() ? PF_R : 0;
|
||||
if (region.is_writable())
|
||||
phdr.p_flags |= PF_W;
|
||||
if (region.is_executable())
|
||||
phdr.p_flags |= PF_X;
|
||||
phdr.p_flags = region.is_readable() ? PF_R : 0;
|
||||
if (region.is_writable())
|
||||
phdr.p_flags |= PF_W;
|
||||
if (region.is_executable())
|
||||
phdr.p_flags |= PF_X;
|
||||
|
||||
offset += phdr.p_filesz;
|
||||
offset += phdr.p_filesz;
|
||||
|
||||
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
|
||||
}
|
||||
[[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast<uint8_t*>(&phdr)), sizeof(ElfW(Phdr)));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
ElfW(Phdr) notes_pheader {};
|
||||
|
@ -188,37 +192,39 @@ ErrorOr<void> Coredump::write_regions()
|
|||
{
|
||||
u8 zero_buffer[PAGE_SIZE] = {};
|
||||
|
||||
return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
VERIFY(!region.is_kernel());
|
||||
return m_process->address_space().with([&](auto& space) {
|
||||
return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
VERIFY(!region.is_kernel());
|
||||
|
||||
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
|
||||
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
|
||||
if (!region.is_mapped())
|
||||
continue;
|
||||
// If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call
|
||||
if (!region.is_mapped())
|
||||
continue;
|
||||
|
||||
region.set_readable(true);
|
||||
region.remap();
|
||||
region.set_readable(true);
|
||||
region.remap();
|
||||
|
||||
for (size_t i = 0; i < region.page_count(); i++) {
|
||||
auto page = region.physical_page(i);
|
||||
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
|
||||
if (page)
|
||||
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
|
||||
// If the current page is not backed by a physical page, we zero it in the coredump file.
|
||||
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
|
||||
}();
|
||||
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
|
||||
for (size_t i = 0; i < region.page_count(); i++) {
|
||||
auto page = region.physical_page(i);
|
||||
auto src_buffer = [&]() -> ErrorOr<UserOrKernelBuffer> {
|
||||
if (page)
|
||||
return UserOrKernelBuffer::for_user_buffer(reinterpret_cast<uint8_t*>((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE);
|
||||
// If the current page is not backed by a physical page, we zero it in the coredump file.
|
||||
return UserOrKernelBuffer::for_kernel_buffer(zero_buffer);
|
||||
}();
|
||||
TRY(m_description->write(src_buffer.value(), PAGE_SIZE));
|
||||
}
|
||||
}
|
||||
}
|
||||
return {};
|
||||
return {};
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -279,34 +285,36 @@ ErrorOr<void> Coredump::create_notes_threads_data(auto& builder) const
|
|||
ErrorOr<void> Coredump::create_notes_regions_data(auto& builder) const
|
||||
{
|
||||
size_t region_index = 0;
|
||||
return m_process->address_space().region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
return m_process->address_space().with([&](auto& space) {
|
||||
return space->region_tree().with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
|
||||
#if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
if (looks_like_userspace_heap_region(region))
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
if (region.access() == Memory::Region::Access::None)
|
||||
continue;
|
||||
|
||||
ELF::Core::MemoryRegionInfo info {};
|
||||
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
|
||||
ELF::Core::MemoryRegionInfo info {};
|
||||
info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo;
|
||||
|
||||
info.region_start = region.vaddr().get();
|
||||
info.region_end = region.vaddr().offset(region.size()).get();
|
||||
info.program_header_index = region_index++;
|
||||
info.region_start = region.vaddr().get();
|
||||
info.region_end = region.vaddr().offset(region.size()).get();
|
||||
info.program_header_index = region_index++;
|
||||
|
||||
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
|
||||
TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) }));
|
||||
|
||||
// NOTE: The region name *is* null-terminated, so the following is ok:
|
||||
auto name = region.name();
|
||||
if (name.is_empty())
|
||||
TRY(builder.append('\0'));
|
||||
else
|
||||
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
|
||||
}
|
||||
return {};
|
||||
// NOTE: The region name *is* null-terminated, so the following is ok:
|
||||
auto name = region.name();
|
||||
if (name.is_empty())
|
||||
TRY(builder.append('\0'));
|
||||
else
|
||||
TRY(builder.append(name.characters_without_null_termination(), name.length() + 1));
|
||||
}
|
||||
return {};
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -344,7 +352,6 @@ ErrorOr<void> Coredump::create_notes_segment_data(auto& builder) const
|
|||
|
||||
ErrorOr<void> Coredump::write()
|
||||
{
|
||||
SpinlockLocker lock(m_process->address_space().get_lock());
|
||||
ScopedAddressSpaceSwitcher switcher(m_process);
|
||||
|
||||
auto builder = TRY(KBufferBuilder::try_create());
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue