1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-25 21:05:07 +00:00

Kernel/x86: Re-enable interrupts ASAP when handling page faults

As soon as we've saved CR2 (the faulting address), we can re-enable
interrupt processing. This should make the kernel more responsive under
heavy fault loads.
This commit is contained in:
Andreas Kling 2022-08-19 12:14:46 +02:00
parent 037f1ae979
commit a84d893af8
4 changed files with 5 additions and 7 deletions

View file

@ -254,8 +254,12 @@ void page_fault_handler(TrapFrame* trap)
{ {
clac(); clac();
auto& regs = *trap->regs; // NOTE: Once we've extracted the faulting address from CR2,
// we can re-enable interrupts.
auto fault_address = read_cr2(); auto fault_address = read_cr2();
sti();
auto& regs = *trap->regs;
if constexpr (PAGE_FAULT_DEBUG) { if constexpr (PAGE_FAULT_DEBUG) {
u32 fault_page_directory = read_cr3(); u32 fault_page_directory = read_cr3();

View file

@ -317,7 +317,6 @@ size_t AnonymousVMObject::cow_pages() const
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr) PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
{ {
VERIFY_INTERRUPTS_DISABLED();
SpinlockLocker lock(m_lock); SpinlockLocker lock(m_lock);
if (is_volatile()) { if (is_volatile()) {

View file

@ -715,8 +715,6 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault) PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
{ {
VERIFY_INTERRUPTS_DISABLED();
auto faulted_in_range = [&fault](auto const* start, auto const* end) { auto faulted_in_range = [&fault](auto const* start, auto const* end) {
return fault.vaddr() >= VirtualAddress { start } && fault.vaddr() < VirtualAddress { end }; return fault.vaddr() >= VirtualAddress { start } && fault.vaddr() < VirtualAddress { end };
}; };

View file

@ -402,7 +402,6 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region) PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
{ {
VERIFY_INTERRUPTS_DISABLED();
VERIFY(vmobject().is_anonymous()); VERIFY(vmobject().is_anonymous());
SpinlockLocker locker(vmobject().m_lock); SpinlockLocker locker(vmobject().m_lock);
@ -444,7 +443,6 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region) PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
{ {
VERIFY_INTERRUPTS_DISABLED();
auto current_thread = Thread::current(); auto current_thread = Thread::current();
if (current_thread) if (current_thread)
current_thread->did_cow_fault(); current_thread->did_cow_fault();
@ -461,7 +459,6 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
{ {
VERIFY_INTERRUPTS_DISABLED();
VERIFY(vmobject().is_inode()); VERIFY(vmobject().is_inode());
VERIFY(!s_mm_lock.is_locked_by_current_processor()); VERIFY(!s_mm_lock.is_locked_by_current_processor());
VERIFY(!g_scheduler_lock.is_locked_by_current_processor()); VERIFY(!g_scheduler_lock.is_locked_by_current_processor());