mirror of
https://github.com/RGBCube/serenity
synced 2025-05-25 12:45:06 +00:00
Kernel: Fix NVMe register access
We need to use the volatile keyword when mapping the device registers, or the compiler may optimize access, which lead to this QEMU error: pci_nvme_ub_mmiord_toosmall in nvme_mmio_read: MMIO read smaller than 32-bits, offset=0x0
This commit is contained in:
parent
c4b78bee45
commit
d1e7b69004
4 changed files with 9 additions and 9 deletions
|
@ -45,7 +45,7 @@ ErrorOr<void> NVMeController::initialize()
|
||||||
|
|
||||||
// Map only until doorbell register for the controller
|
// Map only until doorbell register for the controller
|
||||||
// Queues will individually map the doorbell register respectively
|
// Queues will individually map the doorbell register respectively
|
||||||
m_controller_regs = Memory::map_typed_writable<ControllerRegister>(PhysicalAddress(m_bar));
|
m_controller_regs = Memory::map_typed_writable<volatile ControllerRegister>(PhysicalAddress(m_bar));
|
||||||
|
|
||||||
calculate_doorbell_stride();
|
calculate_doorbell_stride();
|
||||||
TRY(create_admin_queue(irq));
|
TRY(create_admin_queue(irq));
|
||||||
|
@ -250,7 +250,7 @@ ErrorOr<void> NVMeController::create_admin_queue(u8 irq)
|
||||||
auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue", Memory::Region::Access::ReadWrite, sq_dma_pages));
|
auto buffer = TRY(MM.allocate_dma_buffer_pages(sq_size, "Admin SQ queue", Memory::Region::Access::ReadWrite, sq_dma_pages));
|
||||||
sq_dma_region = move(buffer);
|
sq_dma_region = move(buffer);
|
||||||
}
|
}
|
||||||
auto doorbell_regs = Memory::map_typed_writable<DoorbellRegister>(PhysicalAddress(m_bar + REG_SQ0TDBL_START));
|
auto doorbell_regs = Memory::map_typed_writable<volatile DoorbellRegister>(PhysicalAddress(m_bar + REG_SQ0TDBL_START));
|
||||||
|
|
||||||
m_admin_queue = TRY(NVMeQueue::try_create(0, irq, qdepth, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs)));
|
m_admin_queue = TRY(NVMeQueue::try_create(0, irq, qdepth, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs)));
|
||||||
|
|
||||||
|
@ -317,7 +317,7 @@ ErrorOr<void> NVMeController::create_io_queue(u8 irq, u8 qid)
|
||||||
}
|
}
|
||||||
|
|
||||||
auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
|
auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
|
||||||
auto doorbell_regs = Memory::map_typed_writable<DoorbellRegister>(PhysicalAddress(m_bar + queue_doorbell_offset));
|
auto doorbell_regs = Memory::map_typed_writable<volatile DoorbellRegister>(PhysicalAddress(m_bar + queue_doorbell_offset));
|
||||||
|
|
||||||
m_queues.append(TRY(NVMeQueue::try_create(qid, irq, IO_QUEUE_SIZE, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs))));
|
m_queues.append(TRY(NVMeQueue::try_create(qid, irq, IO_QUEUE_SIZE, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs))));
|
||||||
m_queues.last().enable_interrupts();
|
m_queues.last().enable_interrupts();
|
||||||
|
|
|
@ -69,7 +69,7 @@ private:
|
||||||
RefPtr<NVMeQueue> m_admin_queue;
|
RefPtr<NVMeQueue> m_admin_queue;
|
||||||
NonnullRefPtrVector<NVMeQueue> m_queues;
|
NonnullRefPtrVector<NVMeQueue> m_queues;
|
||||||
NonnullRefPtrVector<NVMeNameSpace> m_namespaces;
|
NonnullRefPtrVector<NVMeNameSpace> m_namespaces;
|
||||||
Memory::TypedMapping<ControllerRegister> m_controller_regs;
|
Memory::TypedMapping<volatile ControllerRegister> m_controller_regs;
|
||||||
bool m_admin_queue_ready { false };
|
bool m_admin_queue_ready { false };
|
||||||
size_t m_device_count {};
|
size_t m_device_count {};
|
||||||
u32 m_bar;
|
u32 m_bar;
|
||||||
|
|
|
@ -13,14 +13,14 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs)
|
ErrorOr<NonnullRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
|
||||||
{
|
{
|
||||||
auto queue = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) NVMeQueue(qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
auto queue = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) NVMeQueue(qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
||||||
TRY(queue->create());
|
TRY(queue->create());
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs)
|
NVMeQueue::NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs)
|
||||||
: IRQHandler(irq)
|
: IRQHandler(irq)
|
||||||
, m_qid(qid)
|
, m_qid(qid)
|
||||||
, m_admin_queue(qid == 0)
|
, m_admin_queue(qid == 0)
|
||||||
|
|
|
@ -29,9 +29,9 @@ class AsyncBlockDeviceRequest;
|
||||||
class NVMeQueue : public IRQHandler
|
class NVMeQueue : public IRQHandler
|
||||||
, public RefCounted<NVMeQueue> {
|
, public RefCounted<NVMeQueue> {
|
||||||
public:
|
public:
|
||||||
static ErrorOr<NonnullRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs);
|
static ErrorOr<NonnullRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
|
||||||
ErrorOr<void> create();
|
ErrorOr<void> create();
|
||||||
explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister> db_regs);
|
explicit NVMeQueue(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<volatile DoorbellRegister> db_regs);
|
||||||
bool is_admin_queue() { return m_admin_queue; };
|
bool is_admin_queue() { return m_admin_queue; };
|
||||||
bool handle_irq(const RegisterState&) override;
|
bool handle_irq(const RegisterState&) override;
|
||||||
void submit_sqe(struct NVMeSubmission&);
|
void submit_sqe(struct NVMeSubmission&);
|
||||||
|
@ -73,7 +73,7 @@ private:
|
||||||
NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
|
NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
|
||||||
Span<NVMeCompletion> m_cqe_array;
|
Span<NVMeCompletion> m_cqe_array;
|
||||||
OwnPtr<Memory::Region> m_rw_dma_region;
|
OwnPtr<Memory::Region> m_rw_dma_region;
|
||||||
Memory::TypedMapping<DoorbellRegister> m_db_regs;
|
Memory::TypedMapping<volatile DoorbellRegister> m_db_regs;
|
||||||
RefPtr<Memory::PhysicalPage> m_rw_dma_page;
|
RefPtr<Memory::PhysicalPage> m_rw_dma_page;
|
||||||
Spinlock m_request_lock;
|
Spinlock m_request_lock;
|
||||||
RefPtr<AsyncBlockDeviceRequest> m_current_request;
|
RefPtr<AsyncBlockDeviceRequest> m_current_request;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue