1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 19:38:12 +00:00

Kernel: Send SIGBUS to threads that use after valid Inode mmaped range

According to Dr. POSIX, we should allow to call mmap on inodes even on
ranges that currently don't map to any actual data. Trying to read or
write to those ranges should result in SIGBUS being sent to the thread
that did violating memory access.
This commit is contained in:
Liav A 2022-08-07 22:08:52 +03:00 committed by Idan Horowitz
parent 3ad0e1a1d5
commit 0c675192c9
4 changed files with 57 additions and 33 deletions

View file

@ -362,7 +362,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
}
if (vmobject().is_inode()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(inode) fault in Region({})[{}]", this, page_index_in_region);
return handle_inode_fault(page_index_in_region);
return handle_inode_fault(page_index_in_region, offset_in_page_from_address(fault.vaddr()));
}
SpinlockLocker vmobject_locker(vmobject().m_lock);
@ -462,7 +462,7 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
return response;
}
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, size_t offset_in_page_in_region)
{
VERIFY(vmobject().is_inode());
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
@ -475,6 +475,13 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
{
// NOTE: The VMObject lock is required when manipulating the VMObject's physical page slot.
SpinlockLocker locker(inode_vmobject.m_lock);
if (inode_vmobject.inode().size() == 0)
return PageFaultResponse::BusError;
auto fault_vaddr = vaddr_from_page_index(page_index_in_vmobject).offset(offset_in_page_in_region);
auto inode_last_valid_address = vaddr().offset(inode_vmobject.inode().size());
if (inode_last_valid_address < fault_vaddr)
return PageFaultResponse::BusError;
if (!vmobject_physical_page_slot.is_null()) {
dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
if (!remap_vmobject_page(page_index_in_vmobject, *vmobject_physical_page_slot))