/* * Copyright (c) 2021, Pankaj R * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once #include #include #include #include #include #include #include #include #include #include #include #include namespace Kernel { struct DoorbellRegister { u32 sq_tail; u32 cq_head; }; struct Doorbell { Memory::TypedMapping mmio_reg; Memory::TypedMapping dbbuf_shadow; Memory::TypedMapping dbbuf_eventidx; }; enum class QueueType { Polled, IRQ }; class AsyncBlockDeviceRequest; struct NVMeIO { void clear() { request = nullptr; end_io_handler = nullptr; } RefPtr request; Function end_io_handler; }; class NVMeController; class NVMeQueue : public AtomicRefCounted { public: static ErrorOr> try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr cq_dma_region, OwnPtr sq_dma_region, Doorbell db_regs, QueueType queue_type); bool is_admin_queue() { return m_admin_queue; } u16 submit_sync_sqe(NVMeSubmission&); void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); void write(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); virtual void submit_sqe(NVMeSubmission&); virtual ~NVMeQueue(); protected: u32 process_cq(); // Updates the shadow buffer and returns if mmio is needed bool update_shadow_buf(u16 new_value, u32* dbbuf, u32* ei) { u32 const old = *dbbuf; *dbbuf = new_value; AK::full_memory_barrier(); bool need_mmio = static_cast(new_value - *ei - 1) < static_cast(new_value - old); return need_mmio; } void update_sq_doorbell() { full_memory_barrier(); if (m_db_regs.dbbuf_shadow.paddr.is_null() || update_shadow_buf(m_sq_tail, &m_db_regs.dbbuf_shadow->sq_tail, &m_db_regs.dbbuf_eventidx->sq_tail)) m_db_regs.mmio_reg->sq_tail = m_sq_tail; } NVMeQueue(NonnullOwnPtr rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr cq_dma_region, OwnPtr sq_dma_region, Doorbell db_regs); [[nodiscard]] u32 get_request_cid() { u32 expected_tag = m_tag.load(AK::memory_order_acquire); for (;;) { u32 cid = expected_tag + 1; if (cid == m_qdepth) cid = 0; if (m_tag.compare_exchange_strong(expected_tag, cid, AK::memory_order_acquire)) return cid; } } virtual void complete_current_request(u16 cmdid, u16 status); private: bool cqe_available(); void update_cqe_head(); void update_cq_doorbell() { full_memory_barrier(); if (m_db_regs.dbbuf_shadow.paddr.is_null() || update_shadow_buf(m_cq_head, &m_db_regs.dbbuf_shadow->cq_head, &m_db_regs.dbbuf_eventidx->cq_head)) m_db_regs.mmio_reg->cq_head = m_cq_head; } protected: SpinlockProtected, LockRank::None> m_requests; NonnullOwnPtr m_rw_dma_region; private: u16 m_qid {}; u8 m_cq_valid_phase { 1 }; u16 m_sq_tail {}; u16 m_cq_head {}; bool m_admin_queue { false }; u32 m_qdepth {}; Atomic m_tag { 0 }; // used for the cid in a submission queue entry Spinlock m_sq_lock {}; OwnPtr m_cq_dma_region; Span m_sqe_array; OwnPtr m_sq_dma_region; Span m_cqe_array; WaitQueue m_sync_wait_queue; Doorbell m_db_regs; NonnullRefPtr const m_rw_dma_page; }; }