1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 07:44:59 +00:00

Kernel/Storage: Don't allocate IRQs in NVMeCntlr when nvme_poll passed

This commit is contained in:
Sönke Holz 2024-02-22 16:50:55 +01:00 committed by Andrew Kaster
parent 0f54d797d2
commit cdc0c9f094
4 changed files with 15 additions and 8 deletions

View file

@ -54,8 +54,10 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::initialize(bool is_queue_polled)
m_ready_timeout = Duration::from_milliseconds((CAP_TO(caps) + 1) * 500); // CAP.TO is in 500ms units m_ready_timeout = Duration::from_milliseconds((CAP_TO(caps) + 1) * 500); // CAP.TO is in 500ms units
calculate_doorbell_stride(); calculate_doorbell_stride();
// IO queues + 1 admin queue if (queue_type == QueueType::IRQ) {
m_irq_type = TRY(reserve_irqs(nr_of_queues + 1, true)); // IO queues + 1 admin queue
m_irq_type = TRY(reserve_irqs(nr_of_queues + 1, true));
}
TRY(create_admin_queue(queue_type)); TRY(create_admin_queue(queue_type));
VERIFY(m_admin_queue_ready == true); VERIFY(m_admin_queue_ready == true);
@ -351,7 +353,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(QueueType queu
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr())); m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr())); m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
auto irq = TRY(allocate_irq(0)); // Admin queue always uses the 0th index when using MSIx Optional<u8> irq;
if (queue_type == QueueType::IRQ)
irq = TRY(allocate_irq(0)); // Admin queue always uses the 0th index when using MSIx
maybe_error = start_controller(); maybe_error = start_controller();
if (maybe_error.is_error()) { if (maybe_error.is_error()) {
@ -398,7 +402,10 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, QueueType
auto flags = (queue_type == QueueType::IRQ) ? QUEUE_IRQ_ENABLED : QUEUE_IRQ_DISABLED; auto flags = (queue_type == QueueType::IRQ) ? QUEUE_IRQ_ENABLED : QUEUE_IRQ_DISABLED;
flags |= QUEUE_PHY_CONTIGUOUS; flags |= QUEUE_PHY_CONTIGUOUS;
// When using MSIx interrupts, qid is used as an index into the interrupt table // When using MSIx interrupts, qid is used as an index into the interrupt table
sub.create_cq.irq_vector = (m_irq_type == PCI::InterruptType::PIN) ? 0 : qid; if (m_irq_type.has_value() && m_irq_type.value() != PCI::InterruptType::PIN)
sub.create_cq.irq_vector = qid;
else
sub.create_cq.irq_vector = 0;
sub.create_cq.cq_flags = AK::convert_between_host_and_little_endian(flags & 0xFFFF); sub.create_cq.cq_flags = AK::convert_between_host_and_little_endian(flags & 0xFFFF);
submit_admin_command(sub, true); submit_admin_command(sub, true);
} }

View file

@ -80,7 +80,7 @@ private:
AK::Duration m_ready_timeout; AK::Duration m_ready_timeout;
PhysicalAddress m_bar { 0 }; PhysicalAddress m_bar { 0 };
u8 m_dbl_stride { 0 }; u8 m_dbl_stride { 0 };
PCI::InterruptType m_irq_type; Optional<PCI::InterruptType> m_irq_type;
QueueType m_queue_type { QueueType::IRQ }; QueueType m_queue_type { QueueType::IRQ };
static Atomic<u8> s_controller_id; static Atomic<u8> s_controller_id;
}; };

View file

@ -12,7 +12,7 @@
#include <Kernel/Library/StdLib.h> #include <Kernel/Library/StdLib.h>
namespace Kernel { namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs, QueueType queue_type) ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& device, u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs, QueueType queue_type)
{ {
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it) // Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
RefPtr<Memory::PhysicalPage> rw_dma_page; RefPtr<Memory::PhysicalPage> rw_dma_page;
@ -26,7 +26,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(NVMeController& devi
return queue; return queue;
} }
auto queue = NVMeInterruptQueue::try_create(device, move(rw_dma_region), rw_dma_page.release_nonnull(), qid, irq, q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs)); auto queue = NVMeInterruptQueue::try_create(device, move(rw_dma_region), rw_dma_page.release_nonnull(), qid, irq.release_value(), q_depth, move(cq_dma_region), move(sq_dma_region), move(db_regs));
return queue; return queue;
} }

View file

@ -52,7 +52,7 @@ struct NVMeIO {
class NVMeController; class NVMeController;
class NVMeQueue : public AtomicRefCounted<NVMeQueue> { class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
public: public:
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(NVMeController& device, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs, QueueType queue_type); static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(NVMeController& device, u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, OwnPtr<Memory::Region> sq_dma_region, Doorbell db_regs, QueueType queue_type);
bool is_admin_queue() { return m_admin_queue; } bool is_admin_queue() { return m_admin_queue; }
u16 submit_sync_sqe(NVMeSubmission&); u16 submit_sync_sqe(NVMeSubmission&);
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count); void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);