mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 21:47:46 +00:00
Kernel: Replace bespoke & rickety NVMeIO synchronization mechanism
Instead of assuming data races won't occur and trying to somehow verify it with manual un-atomic tracking, we can just use a recursive spinlock instead of a normal one, to resolve the original deadlock.
This commit is contained in:
parent
a957907f4b
commit
38dad2e27f
2 changed files with 5 additions and 18 deletions
|
@ -111,11 +111,7 @@ void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
|
||||||
auto current_request = request_pdu.request;
|
auto current_request = request_pdu.request;
|
||||||
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
|
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
|
||||||
|
|
||||||
ScopeGuard guard = [req_result, status, &request_pdu, &lock] {
|
ScopeGuard guard = [req_result, status, &request_pdu] {
|
||||||
// FIXME: We should unlock at the end of this function to make sure no new requests is inserted
|
|
||||||
// before we complete the request and calling end_io_handler but that results in a deadlock
|
|
||||||
// For now this is avoided by asserting the `used` field while inserting.
|
|
||||||
lock.unlock();
|
|
||||||
if (request_pdu.request)
|
if (request_pdu.request)
|
||||||
request_pdu.request->complete(req_result);
|
request_pdu.request->complete(req_result);
|
||||||
if (request_pdu.end_io_handler)
|
if (request_pdu.end_io_handler)
|
||||||
|
@ -150,10 +146,7 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||||
|
|
||||||
{
|
{
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
SpinlockLocker req_lock(m_request_lock);
|
||||||
|
m_requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
||||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
|
||||||
VERIFY_NOT_REACHED();
|
|
||||||
m_requests.set(sub.cmdid, { nullptr, true, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
|
||||||
}
|
}
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
|
|
||||||
|
@ -176,9 +169,7 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
|
|
||||||
{
|
{
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
SpinlockLocker req_lock(m_request_lock);
|
||||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
m_requests.set(sub.cmdid, { request, nullptr });
|
||||||
VERIFY_NOT_REACHED();
|
|
||||||
m_requests.set(sub.cmdid, { request, true, nullptr });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
full_memory_barrier();
|
full_memory_barrier();
|
||||||
|
@ -199,9 +190,7 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
||||||
|
|
||||||
{
|
{
|
||||||
SpinlockLocker req_lock(m_request_lock);
|
SpinlockLocker req_lock(m_request_lock);
|
||||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
m_requests.set(sub.cmdid, { request, nullptr });
|
||||||
VERIFY_NOT_REACHED();
|
|
||||||
m_requests.set(sub.cmdid, { request, true, nullptr });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
||||||
|
|
|
@ -42,12 +42,10 @@ class AsyncBlockDeviceRequest;
|
||||||
struct NVMeIO {
|
struct NVMeIO {
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
used = false;
|
|
||||||
request = nullptr;
|
request = nullptr;
|
||||||
end_io_handler = nullptr;
|
end_io_handler = nullptr;
|
||||||
}
|
}
|
||||||
RefPtr<AsyncBlockDeviceRequest> request;
|
RefPtr<AsyncBlockDeviceRequest> request;
|
||||||
bool used = false;
|
|
||||||
Function<void(u16 status)> end_io_handler;
|
Function<void(u16 status)> end_io_handler;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -117,7 +115,7 @@ protected:
|
||||||
Spinlock<LockRank::Interrupts> m_cq_lock {};
|
Spinlock<LockRank::Interrupts> m_cq_lock {};
|
||||||
HashMap<u16, NVMeIO> m_requests;
|
HashMap<u16, NVMeIO> m_requests;
|
||||||
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
|
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
|
||||||
Spinlock<LockRank::None> m_request_lock {};
|
RecursiveSpinlock<LockRank::None> m_request_lock {};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u16 m_qid {};
|
u16 m_qid {};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue