mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 17:37:37 +00:00
Kernel/NVMeQueue: Use waitqueue in submit_sync_sqe
The current way we handle sync commands is very ugly and depends on lot of preconditions. Now that we have an end_io handler for a request, we can use WaitQueue to do sync commands more elegantly. This does depend on block layer sending one request at a time but this change is a step forward towards better IO handling.
This commit is contained in:
parent
0096eadf40
commit
a65b0cbe4a
2 changed files with 7 additions and 16 deletions
|
@ -105,7 +105,7 @@ void NVMeQueue::submit_sqe(NVMeSubmission& sub)
|
||||||
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||||
{
|
{
|
||||||
// For now let's use sq tail as a unique command id.
|
// For now let's use sq tail as a unique command id.
|
||||||
u16 cqe_cid;
|
u16 cmd_status;
|
||||||
u16 cid = get_request_cid();
|
u16 cid = get_request_cid();
|
||||||
sub.cmdid = cid;
|
sub.cmdid = cid;
|
||||||
|
|
||||||
|
@ -114,24 +114,14 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
||||||
|
|
||||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
||||||
VERIFY_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
m_requests.set(sub.cmdid, { nullptr, true, nullptr });
|
m_requests.set(sub.cmdid, { nullptr, true, [this, &cmd_status](u16 status) mutable { cmd_status = status; m_sync_wait_queue.wake_all(); } });
|
||||||
}
|
}
|
||||||
|
|
||||||
submit_sqe(sub);
|
submit_sqe(sub);
|
||||||
do {
|
|
||||||
int index;
|
|
||||||
{
|
|
||||||
SpinlockLocker lock(m_cq_lock);
|
|
||||||
index = m_cq_head - 1;
|
|
||||||
if (index < 0)
|
|
||||||
index = m_qdepth - 1;
|
|
||||||
}
|
|
||||||
cqe_cid = m_cqe_array[index].command_id;
|
|
||||||
microseconds_delay(1);
|
|
||||||
} while (cid != cqe_cid);
|
|
||||||
|
|
||||||
auto status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
|
// FIXME: Only sync submissions (usually used for admin commands) use a WaitQueue based IO. Eventually we need to
|
||||||
return status;
|
// move this logic into the block layer instead of sprinkling them in the driver code.
|
||||||
|
m_sync_wait_queue.wait_forever("NVMe sync submit"sv);
|
||||||
|
return cmd_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
|
void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
|
||||||
|
|
|
@ -95,6 +95,7 @@ private:
|
||||||
OwnPtr<Memory::Region> m_sq_dma_region;
|
OwnPtr<Memory::Region> m_sq_dma_region;
|
||||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_sq_dma_page;
|
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_sq_dma_page;
|
||||||
Span<NVMeCompletion> m_cqe_array;
|
Span<NVMeCompletion> m_cqe_array;
|
||||||
|
WaitQueue m_sync_wait_queue;
|
||||||
Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
|
Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
|
||||||
NonnullRefPtr<Memory::PhysicalPage const> m_rw_dma_page;
|
NonnullRefPtr<Memory::PhysicalPage const> m_rw_dma_page;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue