mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 15:15:07 +00:00
Kernel/NVMe: Redesign the tracking of requests in an NVMe Queue
There was a private variable named m_current_request which was used to track a single request at a time. This guarantee is given by the block layer where we wait on each IO. This design will break down in the driver once the block layer removes that constraint. Redesign the IO handling in a completely asynchronous way by maintaining requests up to queue depth. NVMeIO struct is introduced to track an IO submitted along with other information such whether the IO is still being processed and an endio callback which will be called during the end of a request. A hashmap private variable is created which will key based on the command id of a request with a value of NVMeIO. endio handler will come in handy if we are doing a sync request and we want to wake up the wait queue during the end. This change also simplified the code by removing some special condition in submit_sqe function, etc that were marked as FIXME for a long time.
This commit is contained in:
parent
3fe7bda021
commit
0096eadf40
6 changed files with 103 additions and 43 deletions
|
@ -26,8 +26,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8
|
|||
}
|
||||
|
||||
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
: m_current_request(nullptr)
|
||||
, m_rw_dma_region(move(rw_dma_region))
|
||||
: m_rw_dma_region(move(rw_dma_region))
|
||||
, m_qid(qid)
|
||||
, m_admin_queue(qid == 0)
|
||||
, m_qdepth(q_depth)
|
||||
|
@ -39,6 +38,7 @@ UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_regio
|
|||
, m_rw_dma_page(rw_dma_page)
|
||||
|
||||
{
|
||||
m_requests.try_ensure_capacity(q_depth).release_value_but_fixme_should_propagate_errors();
|
||||
m_sqe_array = { reinterpret_cast<NVMeSubmission*>(m_sq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||
m_cqe_array = { reinterpret_cast<NVMeCompletion*>(m_cq_dma_region->vaddr().as_ptr()), m_qdepth };
|
||||
}
|
||||
|
@ -70,15 +70,12 @@ u32 NVMeQueue::process_cq()
|
|||
status = CQ_STATUS_FIELD(m_cqe_array[m_cq_head].status);
|
||||
cmdid = m_cqe_array[m_cq_head].command_id;
|
||||
dbgln_if(NVME_DEBUG, "NVMe: Completion with status {:x} and command identifier {}. CQ_HEAD: {}", status, cmdid, m_cq_head);
|
||||
// TODO: We don't use AsyncBlockDevice requests for admin queue as it is only applicable for a block device (NVMe namespace)
|
||||
// But admin commands precedes namespace creation. Unify requests to avoid special conditions
|
||||
if (m_admin_queue == false) {
|
||||
// As the block layer calls are now sync (as we wait on each requests),
|
||||
// everything is operated on a single request similar to BMIDE driver.
|
||||
if (m_current_request) {
|
||||
complete_current_request(status);
|
||||
}
|
||||
|
||||
if (!m_requests.contains(cmdid)) {
|
||||
dmesgln("Bogus cmd id: {}", cmdid);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
complete_current_request(cmdid, status);
|
||||
update_cqe_head();
|
||||
}
|
||||
if (nr_of_processed_cqes) {
|
||||
|
@ -112,6 +109,14 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
|||
u16 cid = get_request_cid();
|
||||
sub.cmdid = cid;
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
|
||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
||||
VERIFY_NOT_REACHED();
|
||||
m_requests.set(sub.cmdid, { nullptr, true, nullptr });
|
||||
}
|
||||
|
||||
submit_sqe(sub);
|
||||
do {
|
||||
int index;
|
||||
|
@ -132,9 +137,6 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
|
|||
void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
|
||||
{
|
||||
NVMeSubmission sub {};
|
||||
SpinlockLocker m_lock(m_request_lock);
|
||||
m_current_request = request;
|
||||
|
||||
sub.op = OP_NVME_READ;
|
||||
sub.rw.nsid = nsid;
|
||||
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
|
||||
|
@ -143,6 +145,13 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
|||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||
sub.cmdid = get_request_cid();
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
||||
VERIFY_NOT_REACHED();
|
||||
m_requests.set(sub.cmdid, { request, true, nullptr });
|
||||
}
|
||||
|
||||
full_memory_barrier();
|
||||
submit_sqe(sub);
|
||||
}
|
||||
|
@ -150,13 +159,7 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
|||
void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count)
|
||||
{
|
||||
NVMeSubmission sub {};
|
||||
SpinlockLocker m_lock(m_request_lock);
|
||||
m_current_request = request;
|
||||
|
||||
if (auto result = m_current_request->read_from_buffer(m_current_request->buffer(), m_rw_dma_region->vaddr().as_ptr(), m_current_request->buffer_size()); result.is_error()) {
|
||||
complete_current_request(AsyncDeviceRequest::MemoryFault);
|
||||
return;
|
||||
}
|
||||
sub.op = OP_NVME_WRITE;
|
||||
sub.rw.nsid = nsid;
|
||||
sub.rw.slba = AK::convert_between_host_and_little_endian(index);
|
||||
|
@ -165,6 +168,18 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
|
|||
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
|
||||
sub.cmdid = get_request_cid();
|
||||
|
||||
{
|
||||
SpinlockLocker req_lock(m_request_lock);
|
||||
if (m_requests.contains(sub.cmdid) && m_requests.get(sub.cmdid).release_value().used)
|
||||
VERIFY_NOT_REACHED();
|
||||
m_requests.set(sub.cmdid, { request, true, nullptr });
|
||||
}
|
||||
|
||||
if (auto result = request.read_from_buffer(request.buffer(), m_rw_dma_region->vaddr().as_ptr(), request.buffer_size()); result.is_error()) {
|
||||
complete_current_request(sub.cmdid, AsyncDeviceRequest::MemoryFault);
|
||||
return;
|
||||
}
|
||||
|
||||
full_memory_barrier();
|
||||
submit_sqe(sub);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue