1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 00:07:43 +00:00

Kernel: Switch to SpinlockProtected to protect NVMeQueue's requests map

This helps ensure no one accidentally accesses m_requests without first
locking it's spinlock. In fact this change fixed such a case, since
process_cq() implicitly assumed the caller locked the lock, which was
not the case for NVMePollQueue::submit_sqe().
This commit is contained in:
Idan Horowitz 2024-02-09 17:23:37 +02:00 committed by Andreas Kling
parent 263127f21a
commit 45aee20ea9
3 changed files with 61 additions and 60 deletions

View file

@ -31,7 +31,6 @@ void NVMeInterruptQueue::initialize_interrupt_queue()
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
{
SpinlockLocker lock(m_request_lock);
return process_cq() ? true : false;
}
@ -47,14 +46,15 @@ void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
});
if (work_item_creation_result.is_error()) {
SpinlockLocker lock(m_request_lock);
auto& request_pdu = m_requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
m_requests.with([cmdid, status](auto& requests) {
auto& request_pdu = requests.get(cmdid).release_value();
auto current_request = request_pdu.request;
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
current_request->complete(AsyncDeviceRequest::OutOfMemory);
if (request_pdu.end_io_handler)
request_pdu.end_io_handler(status);
request_pdu.clear();
});
}
}
}