1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 08:57:34 +00:00

Kernel/NVMe: Use an Atomic for command id instead of sq index

Using sq_tail as cid makes an inherent assumption that we send only
one IO at a time. Use an atomic variable instead for command id of a
submission queue entry.

As sq_tail is not used as cid anymore, remove m_prev_sq_tail which used
to hold the last used sq_tail value.
This commit is contained in:
Pankaj Raghav 2023-03-29 10:25:04 +02:00 committed by Jelle Raaijmakers
parent e219662ce0
commit 3fe7bda021
2 changed files with 18 additions and 7 deletions

View file

@ -75,8 +75,6 @@ u32 NVMeQueue::process_cq()
if (m_admin_queue == false) {
// As the block layer calls are now sync (as we wait on each requests),
// everything is operated on a single request similar to BMIDE driver.
// TODO: Remove this constraint eventually.
VERIFY(cmdid == m_prev_sq_tail);
if (m_current_request) {
complete_current_request(status);
}
@ -92,9 +90,6 @@ u32 NVMeQueue::process_cq()
void NVMeQueue::submit_sqe(NVMeSubmission& sub)
{
SpinlockLocker lock(m_sq_lock);
// For now let's use sq tail as a unique command id.
sub.cmdid = m_sq_tail;
m_prev_sq_tail = m_sq_tail;
memcpy(&m_sqe_array[m_sq_tail], &sub, sizeof(NVMeSubmission));
{
@ -114,7 +109,8 @@ u16 NVMeQueue::submit_sync_sqe(NVMeSubmission& sub)
{
// For now let's use sq tail as a unique command id.
u16 cqe_cid;
u16 cid = m_sq_tail;
u16 cid = get_request_cid();
sub.cmdid = cid;
submit_sqe(sub);
do {
@ -145,6 +141,7 @@ void NVMeQueue::read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
full_memory_barrier();
submit_sqe(sub);
@ -166,6 +163,7 @@ void NVMeQueue::write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32
// No. of lbas is 0 based
sub.rw.length = AK::convert_between_host_and_little_endian((count - 1) & 0xFFFF);
sub.rw.data_ptr.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(m_rw_dma_page->paddr().as_ptr()));
sub.cmdid = get_request_cid();
full_memory_barrier();
submit_sqe(sub);

View file

@ -44,6 +44,19 @@ protected:
}
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
[[nodiscard]] u32 get_request_cid()
{
u32 expected_tag = m_tag.load(AK::memory_order_acquire);
for (;;) {
u32 cid = expected_tag + 1;
if (cid == m_qdepth)
cid = 0;
if (m_tag.compare_exchange_strong(expected_tag, cid, AK::memory_order_acquire))
return cid;
}
}
private:
bool cqe_available();
void update_cqe_head();
@ -63,10 +76,10 @@ private:
u16 m_qid {};
u8 m_cq_valid_phase { 1 };
u16 m_sq_tail {};
u16 m_prev_sq_tail {};
u16 m_cq_head {};
bool m_admin_queue { false };
u32 m_qdepth {};
Atomic<u32> m_tag { 0 }; // used for the cid in a submission queue entry
Spinlock<LockRank::Interrupts> m_sq_lock {};
OwnPtr<Memory::Region> m_cq_dma_region;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_cq_dma_page;