mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 01:57:45 +00:00
Everywhere: Stop using NonnullRefPtrVector
This class had slightly confusing semantics and the added weirdness doesn't seem worth it just so we can say "." instead of "->" when iterating over a vector of NNRPs. This patch replaces NonnullRefPtrVector<T> with Vector<NNRP<T>>.
This commit is contained in:
parent
104be6c8ac
commit
8a48246ed1
168 changed files with 1280 additions and 1280 deletions
|
@ -210,7 +210,7 @@ void AHCIPort::eject()
|
|||
auto unused_command_header = try_to_find_unused_command_header();
|
||||
VERIFY(unused_command_header.has_value());
|
||||
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctbau = 0;
|
||||
command_list_entries[unused_command_header.value()].prdbc = 0;
|
||||
command_list_entries[unused_command_header.value()].prdtl = 0;
|
||||
|
@ -221,7 +221,7 @@ void AHCIPort::eject()
|
|||
// handshake error bit in PxSERR register if CFL is incorrect.
|
||||
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | AHCI::CommandHeaderAttributes::C | AHCI::CommandHeaderAttributes::A;
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
|
||||
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
|
||||
|
@ -497,7 +497,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li
|
|||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(request.block_count() > 0);
|
||||
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> allocated_dma_regions;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> allocated_dma_regions;
|
||||
for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) {
|
||||
allocated_dma_regions.append(m_dma_buffers.at(index));
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
|
|||
auto unused_command_header = try_to_find_unused_command_header();
|
||||
VERIFY(unused_command_header.has_value());
|
||||
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctbau = 0;
|
||||
command_list_entries[unused_command_header.value()].prdbc = 0;
|
||||
command_list_entries[unused_command_header.value()].prdtl = m_current_scatter_list->scatters_count();
|
||||
|
@ -591,7 +591,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
|
|||
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
|
||||
|
@ -652,7 +652,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
|
|||
mark_command_header_ready_to_process(unused_command_header.value());
|
||||
full_memory_barrier();
|
||||
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {} @ {}, ended", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count, m_dma_buffers[0].paddr());
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {} @ {}, ended", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count, m_dma_buffers[0]->paddr());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -670,7 +670,7 @@ bool AHCIPort::identify_device()
|
|||
auto unused_command_header = try_to_find_unused_command_header();
|
||||
VERIFY(unused_command_header.has_value());
|
||||
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
|
||||
command_list_entries[unused_command_header.value()].ctbau = 0;
|
||||
command_list_entries[unused_command_header.value()].prdbc = 512;
|
||||
command_list_entries[unused_command_header.value()].prdtl = 1;
|
||||
|
@ -679,7 +679,7 @@ bool AHCIPort::identify_device()
|
|||
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
|
||||
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
|
||||
command_table.descriptors[0].base_high = 0;
|
||||
|
|
|
@ -112,8 +112,8 @@ private:
|
|||
|
||||
mutable bool m_wait_for_completion { false };
|
||||
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> m_dma_buffers;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> m_command_table_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_dma_buffers;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_command_table_pages;
|
||||
RefPtr<Memory::PhysicalPage> m_command_list_page;
|
||||
OwnPtr<Memory::Region> m_command_list_region;
|
||||
RefPtr<Memory::PhysicalPage> m_fis_receive_page;
|
||||
|
|
|
@ -256,9 +256,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
|
|||
{
|
||||
auto qdepth = get_admin_q_dept();
|
||||
OwnPtr<Memory::Region> cq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
|
||||
OwnPtr<Memory::Region> sq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
|
||||
auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096);
|
||||
auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096);
|
||||
if (!reset_controller()) {
|
||||
|
@ -280,8 +280,8 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
|
|||
}
|
||||
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + REG_SQ0TDBL_START)));
|
||||
|
||||
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
|
||||
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
|
||||
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
|
||||
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
|
||||
|
||||
if (!start_controller()) {
|
||||
dmesgln_pci(*this, "Failed to restart the NVMe controller");
|
||||
|
@ -297,9 +297,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
|
|||
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq)
|
||||
{
|
||||
OwnPtr<Memory::Region> cq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
|
||||
OwnPtr<Memory::Region> sq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
|
||||
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
|
||||
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
|
||||
|
||||
|
@ -320,7 +320,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
|
|||
{
|
||||
NVMeSubmission sub {};
|
||||
sub.op = OP_ADMIN_CREATE_COMPLETION_QUEUE;
|
||||
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
|
||||
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
|
||||
sub.create_cq.cqid = qid;
|
||||
// The queue size is 0 based
|
||||
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
||||
|
@ -335,7 +335,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
|
|||
{
|
||||
NVMeSubmission sub {};
|
||||
sub.op = OP_ADMIN_CREATE_SUBMISSION_QUEUE;
|
||||
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
|
||||
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
|
||||
sub.create_sq.sqid = qid;
|
||||
// The queue size is 0 based
|
||||
sub.create_sq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
|
||||
, IRQHandler(irq)
|
||||
{
|
||||
|
|
|
@ -13,7 +13,7 @@ namespace Kernel {
|
|||
class NVMeInterruptQueue : public NVMeQueue
|
||||
, public IRQHandler {
|
||||
public:
|
||||
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
void submit_sqe(NVMeSubmission& submission) override;
|
||||
virtual ~NVMeInterruptQueue() override {};
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <Kernel/Storage/NVMe/NVMePollQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
|
||||
{
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ namespace Kernel {
|
|||
|
||||
class NVMePollQueue : public NVMeQueue {
|
||||
public:
|
||||
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
void submit_sqe(NVMeSubmission& submission) override;
|
||||
virtual ~NVMePollQueue() override {};
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <Kernel/Storage/NVMe/NVMeQueue.h>
|
||||
|
||||
namespace Kernel {
|
||||
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
{
|
||||
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
|
||||
RefPtr<Memory::PhysicalPage> rw_dma_page;
|
||||
|
@ -25,7 +25,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8
|
|||
return queue;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
|
||||
: m_current_request(nullptr)
|
||||
, m_rw_dma_region(move(rw_dma_region))
|
||||
, m_qid(qid)
|
||||
|
|
|
@ -30,7 +30,7 @@ struct DoorbellRegister {
|
|||
class AsyncBlockDeviceRequest;
|
||||
class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
|
||||
public:
|
||||
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
bool is_admin_queue() { return m_admin_queue; };
|
||||
u16 submit_sync_sqe(NVMeSubmission&);
|
||||
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
|
||||
|
@ -44,7 +44,7 @@ protected:
|
|||
{
|
||||
m_db_regs->sq_tail = m_sq_tail;
|
||||
}
|
||||
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
|
||||
|
||||
private:
|
||||
bool cqe_available();
|
||||
|
@ -71,10 +71,10 @@ private:
|
|||
u32 m_qdepth {};
|
||||
Spinlock<LockRank::Interrupts> m_sq_lock {};
|
||||
OwnPtr<Memory::Region> m_cq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> m_cq_dma_page;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_cq_dma_page;
|
||||
Span<NVMeSubmission> m_sqe_array;
|
||||
OwnPtr<Memory::Region> m_sq_dma_region;
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_sq_dma_page;
|
||||
Span<NVMeCompletion> m_cqe_array;
|
||||
Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
|
||||
NonnullRefPtr<Memory::PhysicalPage const> m_rw_dma_page;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue