1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 15:07:45 +00:00

Everywhere: Stop using NonnullRefPtrVector

This class had slightly confusing semantics and the added weirdness
doesn't seem worth it just so we can say "." instead of "->" when
iterating over a vector of NNRPs.

This patch replaces NonnullRefPtrVector<T> with Vector<NNRP<T>>.
This commit is contained in:
Andreas Kling 2023-03-06 14:17:01 +01:00
parent 104be6c8ac
commit 8a48246ed1
168 changed files with 1280 additions and 1280 deletions

View file

@ -183,7 +183,7 @@ ErrorOr<void> Access::fast_enumerate(Function<void(DeviceIdentifier const&)>& ca
{
// Note: We hold the m_access_lock for a brief moment just to ensure we get
// a complete Vector in case someone wants to mutate it.
NonnullRefPtrVector<DeviceIdentifier> device_identifiers;
Vector<NonnullRefPtr<DeviceIdentifier>> device_identifiers;
{
SpinlockLocker locker(m_access_lock);
VERIFY(!m_device_identifiers.is_empty());
@ -198,10 +198,11 @@ ErrorOr<void> Access::fast_enumerate(Function<void(DeviceIdentifier const&)>& ca
DeviceIdentifier const& Access::get_device_identifier(Address address) const
{
for (auto& device_identifier : m_device_identifiers) {
if (device_identifier.address().domain() == address.domain()
&& device_identifier.address().bus() == address.bus()
&& device_identifier.address().device() == address.device()
&& device_identifier.address().function() == address.function()) {
auto device_address = device_identifier->address();
if (device_address.domain() == address.domain()
&& device_address.bus() == address.bus()
&& device_address.device() == address.device()
&& device_address.function() == address.function()) {
return device_identifier;
}
}

View file

@ -62,6 +62,6 @@ private:
mutable Spinlock<LockRank::None> m_scan_lock {};
HashMap<u32, NonnullOwnPtr<PCI::HostController>> m_host_controllers;
NonnullRefPtrVector<DeviceIdentifier> m_device_identifiers;
Vector<NonnullRefPtr<DeviceIdentifier>> m_device_identifiers;
};
}

View file

@ -785,18 +785,18 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
return allocate_dma_buffer_page(name, access, dma_buffer_page);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages)
{
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
return allocate_kernel_region(dma_buffer_pages.first().paddr(), size, name, access, Region::Cacheable::No);
return allocate_kernel_region(dma_buffer_pages.first()->paddr(), size, name, access, Region::Cacheable::No);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
{
VERIFY(!(size % PAGE_SIZE));
NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> dma_buffer_pages;
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
}
@ -1010,12 +1010,12 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(Shoul
});
}
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<NonnullRefPtrVector<PhysicalPage>> {
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> {
// We need to make sure we don't touch pages that we have committed to
if (global_data.system_memory_info.physical_pages_uncommitted < page_count)
return ENOMEM;
@ -1033,7 +1033,7 @@ ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_ph
}));
{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
return physical_pages;

View file

@ -166,13 +166,13 @@ public:
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> allocate_contiguous_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress);
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);

View file

@ -75,7 +75,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(size_t page
return try_create(taken_lower, taken_upper);
}
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
Vector<NonnullRefPtr<PhysicalPage>> PhysicalRegion::take_contiguous_free_pages(size_t count)
{
auto rounded_page_count = next_power_of_two(count);
auto order = count_trailing_zeroes(rounded_page_count);
@ -95,7 +95,7 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
if (!page_base.has_value())
return {};
NonnullRefPtrVector<PhysicalPage> physical_pages;
Vector<NonnullRefPtr<PhysicalPage>> physical_pages;
physical_pages.ensure_capacity(count);
for (size_t i = 0; i < count; ++i)

View file

@ -34,7 +34,7 @@ public:
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(size_t);
RefPtr<PhysicalPage> take_free_page();
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
Vector<NonnullRefPtr<PhysicalPage>> take_contiguous_free_pages(size_t count);
void return_page(PhysicalAddress);
private:

View file

@ -210,7 +210,7 @@ void AHCIPort::eject()
auto unused_command_header = try_to_find_unused_command_header();
VERIFY(unused_command_header.has_value());
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
command_list_entries[unused_command_header.value()].ctbau = 0;
command_list_entries[unused_command_header.value()].prdbc = 0;
command_list_entries[unused_command_header.value()].prdtl = 0;
@ -221,7 +221,7 @@ void AHCIPort::eject()
// handshake error bit in PxSERR register if CFL is incorrect.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | AHCI::CommandHeaderAttributes::C | AHCI::CommandHeaderAttributes::A;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
@ -497,7 +497,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li
VERIFY(m_lock.is_locked());
VERIFY(request.block_count() > 0);
NonnullRefPtrVector<Memory::PhysicalPage> allocated_dma_regions;
Vector<NonnullRefPtr<Memory::PhysicalPage>> allocated_dma_regions;
for (size_t index = 0; index < calculate_descriptors_count(request.block_count()); index++) {
allocated_dma_regions.append(m_dma_buffers.at(index));
}
@ -578,7 +578,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
auto unused_command_header = try_to_find_unused_command_header();
VERIFY(unused_command_header.has_value());
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
command_list_entries[unused_command_header.value()].ctbau = 0;
command_list_entries[unused_command_header.value()].prdbc = 0;
command_list_entries[unused_command_header.value()].prdtl = m_current_scatter_list->scatters_count();
@ -591,7 +591,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
@ -652,7 +652,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
mark_command_header_ready_to_process(unused_command_header.value());
full_memory_barrier();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {} @ {}, ended", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count, m_dma_buffers[0].paddr());
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {} @ {}, ended", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count, m_dma_buffers[0]->paddr());
return true;
}
@ -670,7 +670,7 @@ bool AHCIPort::identify_device()
auto unused_command_header = try_to_find_unused_command_header();
VERIFY(unused_command_header.has_value());
auto* command_list_entries = (volatile AHCI::CommandHeader*)m_command_list_region->vaddr().as_ptr();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()].paddr().get();
command_list_entries[unused_command_header.value()].ctba = m_command_table_pages[unused_command_header.value()]->paddr().get();
command_list_entries[unused_command_header.value()].ctbau = 0;
command_list_entries[unused_command_header.value()].prdbc = 512;
command_list_entries[unused_command_header.value()].prdtl = 1;
@ -679,7 +679,7 @@ bool AHCIPort::identify_device()
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
command_table.descriptors[0].base_high = 0;

View file

@ -112,8 +112,8 @@ private:
mutable bool m_wait_for_completion { false };
NonnullRefPtrVector<Memory::PhysicalPage> m_dma_buffers;
NonnullRefPtrVector<Memory::PhysicalPage> m_command_table_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_dma_buffers;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_command_table_pages;
RefPtr<Memory::PhysicalPage> m_command_list_page;
OwnPtr<Memory::Region> m_command_list_region;
RefPtr<Memory::PhysicalPage> m_fis_receive_page;

View file

@ -256,9 +256,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
{
auto qdepth = get_admin_q_dept();
OwnPtr<Memory::Region> cq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
auto cq_size = round_up_to_power_of_two(CQ_SIZE(qdepth), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(qdepth), 4096);
if (!reset_controller()) {
@ -280,8 +280,8 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
}
auto doorbell_regs = TRY(Memory::map_typed_writable<DoorbellRegister volatile>(PhysicalAddress(m_bar + REG_SQ0TDBL_START)));
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
m_controller_regs->acq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
m_controller_regs->asq = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
if (!start_controller()) {
dmesgln_pci(*this, "Failed to restart the NVMe controller");
@ -297,9 +297,9 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_admin_queue(Optional<u8> i
UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<u8> irq)
{
OwnPtr<Memory::Region> cq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_pages;
OwnPtr<Memory::Region> sq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_pages;
Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_pages;
auto cq_size = round_up_to_power_of_two(CQ_SIZE(IO_QUEUE_SIZE), 4096);
auto sq_size = round_up_to_power_of_two(SQ_SIZE(IO_QUEUE_SIZE), 4096);
@ -320,7 +320,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
{
NVMeSubmission sub {};
sub.op = OP_ADMIN_CREATE_COMPLETION_QUEUE;
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first().paddr().as_ptr()));
sub.create_cq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(cq_dma_pages.first()->paddr().as_ptr()));
sub.create_cq.cqid = qid;
// The queue size is 0 based
sub.create_cq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);
@ -335,7 +335,7 @@ UNMAP_AFTER_INIT ErrorOr<void> NVMeController::create_io_queue(u8 qid, Optional<
{
NVMeSubmission sub {};
sub.op = OP_ADMIN_CREATE_SUBMISSION_QUEUE;
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first().paddr().as_ptr()));
sub.create_sq.prp1 = reinterpret_cast<u64>(AK::convert_between_host_and_little_endian(sq_dma_pages.first()->paddr().as_ptr()));
sub.create_sq.sqid = qid;
// The queue size is 0 based
sub.create_sq.qsize = AK::convert_between_host_and_little_endian(IO_QUEUE_SIZE - 1);

View file

@ -11,7 +11,7 @@
namespace Kernel {
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
UNMAP_AFTER_INIT NVMeInterruptQueue::NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
, IRQHandler(irq)
{

View file

@ -13,7 +13,7 @@ namespace Kernel {
class NVMeInterruptQueue : public NVMeQueue
, public IRQHandler {
public:
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
NVMeInterruptQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u8 irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMeInterruptQueue() override {};

View file

@ -10,7 +10,7 @@
#include <Kernel/Storage/NVMe/NVMePollQueue.h>
namespace Kernel {
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
UNMAP_AFTER_INIT NVMePollQueue::NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: NVMeQueue(move(rw_dma_region), rw_dma_page, qid, q_depth, move(cq_dma_region), cq_dma_page, move(sq_dma_region), sq_dma_page, move(db_regs))
{
}

View file

@ -12,7 +12,7 @@ namespace Kernel {
class NVMePollQueue : public NVMeQueue {
public:
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
NVMePollQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
void submit_sqe(NVMeSubmission& submission) override;
virtual ~NVMePollQueue() override {};

View file

@ -12,7 +12,7 @@
#include <Kernel/Storage/NVMe/NVMeQueue.h>
namespace Kernel {
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
{
// Note: Allocate DMA region for RW operation. For now the requests don't exceed more than 4096 bytes (Storage device takes care of it)
RefPtr<Memory::PhysicalPage> rw_dma_page;
@ -25,7 +25,7 @@ ErrorOr<NonnullLockRefPtr<NVMeQueue>> NVMeQueue::try_create(u16 qid, Optional<u8
return queue;
}
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
UNMAP_AFTER_INIT NVMeQueue::NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs)
: m_current_request(nullptr)
, m_rw_dma_region(move(rw_dma_region))
, m_qid(qid)

View file

@ -30,7 +30,7 @@ struct DoorbellRegister {
class AsyncBlockDeviceRequest;
class NVMeQueue : public AtomicRefCounted<NVMeQueue> {
public:
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
static ErrorOr<NonnullLockRefPtr<NVMeQueue>> try_create(u16 qid, Optional<u8> irq, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
bool is_admin_queue() { return m_admin_queue; };
u16 submit_sync_sqe(NVMeSubmission&);
void read(AsyncBlockDeviceRequest& request, u16 nsid, u64 index, u32 count);
@ -44,7 +44,7 @@ protected:
{
m_db_regs->sq_tail = m_sq_tail;
}
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, NonnullRefPtrVector<Memory::PhysicalPage> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
NVMeQueue(NonnullOwnPtr<Memory::Region> rw_dma_region, Memory::PhysicalPage const& rw_dma_page, u16 qid, u32 q_depth, OwnPtr<Memory::Region> cq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> cq_dma_page, OwnPtr<Memory::Region> sq_dma_region, Vector<NonnullRefPtr<Memory::PhysicalPage>> sq_dma_page, Memory::TypedMapping<DoorbellRegister volatile> db_regs);
private:
bool cqe_available();
@ -71,10 +71,10 @@ private:
u32 m_qdepth {};
Spinlock<LockRank::Interrupts> m_sq_lock {};
OwnPtr<Memory::Region> m_cq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> m_cq_dma_page;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_cq_dma_page;
Span<NVMeSubmission> m_sqe_array;
OwnPtr<Memory::Region> m_sq_dma_region;
NonnullRefPtrVector<Memory::PhysicalPage> m_sq_dma_page;
Vector<NonnullRefPtr<Memory::PhysicalPage>> m_sq_dma_page;
Span<NVMeCompletion> m_cqe_array;
Memory::TypedMapping<DoorbellRegister volatile> m_db_regs;
NonnullRefPtr<Memory::PhysicalPage const> m_rw_dma_page;