mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 06:47:35 +00:00
Kernel: Switch to SpinlockProtected to protect NVMeQueue's requests map
This helps ensure no one accidentally accesses m_requests without first locking it's spinlock. In fact this change fixed such a case, since process_cq() implicitly assumed the caller locked the lock, which was not the case for NVMePollQueue::submit_sqe().
This commit is contained in:
parent
263127f21a
commit
45aee20ea9
3 changed files with 61 additions and 60 deletions
|
@ -31,7 +31,6 @@ void NVMeInterruptQueue::initialize_interrupt_queue()
|
|||
|
||||
bool NVMeInterruptQueue::handle_irq(RegisterState const&)
|
||||
{
|
||||
SpinlockLocker lock(m_request_lock);
|
||||
return process_cq() ? true : false;
|
||||
}
|
||||
|
||||
|
@ -47,14 +46,15 @@ void NVMeInterruptQueue::complete_current_request(u16 cmdid, u16 status)
|
|||
});
|
||||
|
||||
if (work_item_creation_result.is_error()) {
|
||||
SpinlockLocker lock(m_request_lock);
|
||||
auto& request_pdu = m_requests.get(cmdid).release_value();
|
||||
m_requests.with([cmdid, status](auto& requests) {
|
||||
auto& request_pdu = requests.get(cmdid).release_value();
|
||||
auto current_request = request_pdu.request;
|
||||
|
||||
current_request->complete(AsyncDeviceRequest::OutOfMemory);
|
||||
if (request_pdu.end_io_handler)
|
||||
request_pdu.end_io_handler(status);
|
||||
request_pdu.clear();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,9 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_regio
|
|||
, m_rw_dma_page(rw_dma_page)
|
||||
|
||||
{
|
||||
m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
|
||||
m_requests.with([q_depth](auto& requests) {
|
||||
requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
|
||||
});
|
||||
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||
}
|
||||
|
@ -66,6 +68,7 @@ void NVMeQueue::update_cqe_head()
|
|||
u32 NVMeQueue::process_cq()
|
||||
{
|
||||
u32 nr_of_processed_cqes = 0;
|
||||
m_requests.with([this, &nr_of_processed_cqes](auto& requests) {
|
||||
while (cqe_available()) {
|
||||
u16 status;
|
||||
u16 cmdid;
|
||||
|
@ -74,13 +77,14 @@ u32 NVMeQueue::process_cq()
|
|||
cmdid = m_cqe_array[m_cq_head].command_id;
|
||||
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
|
||||
|
||||
if (!m_requests.contains(cmdid)) {
|
||||
if (!requests.contains(cmdid)) {
|
||||
dmesgln("Bogus cmd id: {}", cmdid);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
complete_current_request(cmdid, status);
|
||||
update_cqe_head();
|
||||
}
|
||||
});
|
||||
if (nr_of_processed_cqes) {
|
||||
update_cq_doorbell();
|
||||
}
|
||||
|
@ -106,8 +110,8 @@ void NVMeQueue::submit_sqe(NVMeSubmission& sub)
|
|||
|
||||
void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
|
||||
{
|
||||
SpinlockLocker lock(m_request_lock);
|
||||
auto& request_pdu = m_requests.get(cmdid).release_value();
|
||||
m_requests.with([this, cmdid, status](auto& requests) {
|
||||
auto& request_pdu = requests.get(cmdid).release_value();
|
||||
auto current_request = request_pdu.request;
|
||||
AsyncDeviceRequest::RequestResult req_result = AsyncDeviceRequest::Success;
|
||||
|
||||
|
@ -135,6 +139,7 @@ void NVMeQueue::complete_current_request(u16 cmdid, u16 status)
|
|||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||
|
@ -144,10 +149,9 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
|||
u16 cid = get_request_cid();
|
||||
sub.cmdid = cid;
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
m_requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
||||
}
|
||||
m_requests.with([this, &sub, &cmd_status](auto& requests) {
|
||||
requests.set(sub.cmdid, { nullptr, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
||||
});
|
||||
submit_sqe(sub);
|
||||
|
||||
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
|
||||
|
@ -167,10 +171,9 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
|||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||
sub.cmdid = get_request_cid();
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
m_requests.set(sub.cmdid, { request, nullptr });
|
||||
}
|
||||
m_requests.with([&sub, &request](auto& requests) {
|
||||
requests.set(sub.cmdid, { request, nullptr });
|
||||
});
|
||||
|
||||
full_memory_barrier();
|
||||
submit_sqe(sub);
|
||||
|
@ -188,10 +191,9 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
|||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||
sub.cmdid = get_request_cid();
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
m_requests.set(sub.cmdid, { request, nullptr });
|
||||
}
|
||||
m_requests.with([&sub, &request](auto& requests) {
|
||||
requests.set(sub.cmdid, { request, nullptr });
|
||||
});
|
||||
|
||||
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
||||
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
|
||||
|
|
|
@ -113,9 +113,8 @@ private:
|
|||
|
||||
protected:
|
||||
Spinlock<LockRank::Interrupts> m_cq_lock {};
|
||||
HashMap<u16, NVMeIO> m_requests;
|
||||
SpinlockProtected<HashMap<u16, NVMeIO>, LockRank::None> m_requests;
|
||||
NonnullOwnPtr<Memory::Region> m_rw_dma_region;
|
||||
RecursiveSpinlock<LockRank::None> m_request_lock {};
|
||||
|
||||
private:
|
||||
u16 m_qid {};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue