mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 14:57:35 +00:00
NVMe: Use an explicit Queue type instead of using an Optional irq
Add an explicit QueueType enum which could be used to create a poll or an interrupt queue. This is better than passing an Optional<irq>. This refactoring is in preparation for adding MSIx support to NVMe.
This commit is contained in:
parent
feb48cbc7c
commit
693e3419b7
4 changed files with 20 additions and 14 deletions
|
@ -36,7 +36,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize(bool is_queue_polled)
|
||||||
{
|
{
|
||||||
// Nr of queues = one queue per core
|
// Nr of queues = one queue per core
|
||||||
auto nr_of_queues = Processor::count();
|
auto nr_of_queues = Processor::count();
|
||||||
auto irq = is_queue_polled ? Optional<u8> {} : device_identifier().interrupt_line().value();
|
auto queue_type = is_queue_polled ? QueueType::Polled : QueueType::IRQ;
|
||||||
|
|
||||||
PCI::enable_memory_space(device_identifier());
|
PCI::enable_memory_space(device_identifier());
|
||||||
PCI::enable_bus_mastering(device_identifier());
|
PCI::enable_bus_mastering(device_identifier());
|
||||||
|
@ -52,7 +52,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize(bool is_queue_polled)
|
||||||
m_ready_timeout = Time::from_milliseconds((CAP_TO(caps) + 1) * 500); // CAP.TO is in 500ms units
|
m_ready_timeout = Time::from_milliseconds((CAP_TO(caps) + 1) * 500); // CAP.TO is in 500ms units
|
||||||
|
|
||||||
calculate_doorbell_stride();
|
calculate_doorbell_stride();
|
||||||
TRY(create_admin_queue(irq));
|
TRY(create_admin_queue(queue_type));
|
||||||
VERIFY(m_admin_queue_ready == true);
|
VERIFY(m_admin_queue_ready == true);
|
||||||
|
|
||||||
VERIFY(IO_QUEUE_SIZE < MQES(caps));
|
VERIFY(IO_QUEUE_SIZE < MQES(caps));
|
||||||
|
@ -61,7 +61,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize(bool is_queue_polled)
|
||||||
// Create an IO queue per core
|
// Create an IO queue per core
|
||||||
for (u32 cpuid = 0; cpuid < nr_of_queues; ++cpuid) {
|
for (u32 cpuid = 0; cpuid < nr_of_queues; ++cpuid) {
|
||||||
// qid is zero is used for admin queue
|
// qid is zero is used for admin queue
|
||||||
TRY(create_io_queue(cpuid + 1, irq));
|
TRY(create_io_queue(cpuid + 1, queue_type));
|
||||||
}
|
}
|
||||||
TRY(identify_and_init_namespaces());
|
TRY(identify_and_init_namespaces());
|
||||||
return {};
|
return {};
|
||||||
|
@ -249,7 +249,7 @@ void NVMeController::complete_current_request([[maybe_unused]] AsyncDeviceReques
|
||||||
VERIFY_NOT_REACHED();
|
VERIFY_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> irq)
|
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(QueueType queue_type)
|
||||||
{
|
{
|
||||||
auto qdepth = get_admin_q_dept();
|
auto qdepth = get_admin_q_dept();
|
||||||
OwnPtr<Memory::Region> cq_dma_region;
|
OwnPtr<Memory::Region> cq_dma_region;
|
||||||
|
@ -287,13 +287,13 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
|
||||||
return maybe_error;
|
return maybe_error;
|
||||||
}
|
}
|
||||||
set_admin_queue_ready_flag();
|
set_admin_queue_ready_flag();
|
||||||
m_admin_queue = TRY(NVMeQueue::try_create(0, irq, qdepth, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs)));
|
m_admin_queue = TRY(NVMeQueue::try_create(0, device_identifier().interrupt_line().value(), qdepth, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs), queue_type));
|
||||||
|
|
||||||
dbgln_if(NVME_DEBUG, "NVMe: Admin queue created");
|
dbgln_if(NVME_DEBUG, "NVMe: Admin queue created");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq)
|
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, QueueType queue_type)
|
||||||
{
|
{
|
||||||
OwnPtr<Memory::Region> cq_dma_region;
|
OwnPtr<Memory::Region> cq_dma_region;
|
||||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
|
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
|
||||||
|
@ -323,7 +323,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
|
||||||
sub.create_cq.cqid = qid;
|
sub.create_cq.cqid = qid;
|
||||||
// The queue size is 0 based
|
// The queue size is 0 based
|
||||||
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
||||||
auto flags = irq.has_value() ? QUEUE_IRQ_ENABLED : QUEUE_IRQ_DISABLED;
|
auto flags = (queue_type == QueueType::IRQ) ? QUEUE_IRQ_ENABLED : QUEUE_IRQ_DISABLED;
|
||||||
flags |= QUEUE_PHY_CONTIGUOUS;
|
flags |= QUEUE_PHY_CONTIGUOUS;
|
||||||
// TODO: Eventually move to MSI.
|
// TODO: Eventually move to MSI.
|
||||||
// For now using pin based interrupts. Clear the first 16 bits
|
// For now using pin based interrupts. Clear the first 16 bits
|
||||||
|
@ -347,7 +347,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
|
||||||
auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
|
auto queue_doorbell_offset = REG_SQ0TDBL_START + ((2 * qid) * (4 << m_dbl_stride));
|
||||||
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + queue_doorbell_offset)));
|
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + queue_doorbell_offset)));
|
||||||
|
|
||||||
m_queues.append(TRY(NVMeQueue::try_create(qid, irq, IO_QUEUE_SIZE, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs))));
|
m_queues.append(TRY(NVMeQueue::try_create(qid, device_identifier().interrupt_line().value(), IO_QUEUE_SIZE, move(cq_dma_region), cq_dma_pages, move(sq_dma_region), sq_dma_pages, move(doorbell_regs), queue_type)));
|
||||||
dbgln_if(NVME_DEBUG, "NVMe: Created IO Queue with QID{}", m_queues.size());
|
dbgln_if(NVME_DEBUG, "NVMe: Created IO Queue with QID{}", m_queues.size());
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,8 +59,8 @@ private:
|
||||||
|
|
||||||
ErrorOr<void> identify_and_init_namespaces();
|
ErrorOr<void> identify_and_init_namespaces();
|
||||||
Tuple<u64, u8> get_ns_features(IdentifyNamespace& identify_data_struct);
|
Tuple<u64, u8> get_ns_features(IdentifyNamespace& identify_data_struct);
|
||||||
ErrorOr<void> create_admin_queue(Optional<u8> irq);
|
ErrorOr<void> create_admin_queue(QueueType queue_type);
|
||||||
ErrorOr<void> create_io_queue(u8 qid, Optional<u8> irq);
|
ErrorOr<void> create_io_queue(u8 qid, QueueType queue_type);
|
||||||
void calculate_doorbell_stride()
|
void calculate_doorbell_stride()
|
||||||
{
|
{
|
||||||
m_dbl_stride = (m_controller_regs->cap >> CAP_DBL_SHIFT) & CAP_DBL_MASK;
|
m_dbl_stride = (m_controller_regs->cap >> CAP_DBL_SHIFT) & CAP_DBL_MASK;
|
||||||
|
@ -77,6 +77,7 @@ private:
|
||||||
AK::Time m_ready_timeout;
|
AK::Time m_ready_timeout;
|
||||||
u32 m_bar { 0 };
|
u32 m_bar { 0 };
|
||||||
u8 m_dbl_stride { 0 };
|
u8 m_dbl_stride { 0 };
|
||||||
|
QueueType m_queue_type { QueueType::IRQ };
|
||||||
static Atomic<u8> s_controller_id;
|
static Atomic<u8> s_controller_id;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,16 +12,16 @@
|
||||||
#include <Kernel/Storage/NVMe/NVMeQueue.h>
|
#include <Kernel/Storage/NVMe/NVMeQueue.h>
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs, QueueType queue_type)
|
||||||
{
|
{
|
||||||
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
|
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
|
||||||
RefPtr<Memory::PhysicalPage> rw_dma_page;
|
RefPtr<Memory::PhysicalPage> rw_dma_page;
|
||||||
auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
|
auto rw_dma_region = TRY(MM.allocate_dma_buffer_page("NVMe Queue Read/Write DMA"sv, Memory::Region::Access::ReadWrite, rw_dma_page));
|
||||||
if (!irq.has_value()) {
|
if (queue_type == QueueType::Polled) {
|
||||||
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMePollQueue(move(rw_dma_region), *rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(move(rw_dma_region), *rw_dma_page, qid, irq.value(), q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
auto queue = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) NVMeInterruptQueue(move(rw_dma_region), *rw_dma_page, qid, irq, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))));
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,11 @@ struct DoorbellRegister {
|
||||||
u32 cq_head;
|
u32 cq_head;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class QueueType {
|
||||||
|
Polled,
|
||||||
|
IRQ
|
||||||
|
};
|
||||||
|
|
||||||
class AsyncBlockDeviceRequest;
|
class AsyncBlockDeviceRequest;
|
||||||
|
|
||||||
struct NVMeIO {
|
struct NVMeIO {
|
||||||
|
@ -36,7 +41,7 @@ struct NVMeIO {
|
||||||
|
|
||||||
class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
|
class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
|
||||||
public:
|
public:
|
||||||
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs, QueueType queue_type);
|
||||||
bool is_admin_queue() { return m_admin_queue; };
|
bool is_admin_queue() { return m_admin_queue; };
|
||||||
u16 submit_sync_sqe(NVMeSubmission&);
|
u16 submit_sync_sqe(NVMeSubmission&);
|
||||||
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue