mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 07:47:35 +00:00
Everywhere: Stop using NonnullRefPtrVector
This class had slightly confusing semantics and the added weirdness doesn't seem worth it just so we can say "." instead of "->" when iterating over a vector of NNRPs. This patch replaces NonnullRefPtrVector<T> with Vector<NNRP<T>>.
This commit is contained in:
parent
104be6c8ac
commit
8a48246ed1
168 changed files with 1280 additions and 1280 deletions
|
@ -785,18 +785,18 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(S
|
|||
return allocate_dma_buffer_page(name, access, dma_buffer_page);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages)
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
|
||||
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behaviour by default)
|
||||
return allocate_kernel_region(dma_buffer_pages.first().paddr(), size, name, access, Region::Cacheable::No);
|
||||
return allocate_kernel_region(dma_buffer_pages.first()->paddr(), size, name, access, Region::Cacheable::No);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
NonnullRefPtrVector<Memory::PhysicalPage> dma_buffer_pages;
|
||||
Vector<NonnullRefPtr<Memory::PhysicalPage>> dma_buffer_pages;
|
||||
|
||||
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
|
||||
}
|
||||
|
@ -1010,12 +1010,12 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(Shoul
|
|||
});
|
||||
}
|
||||
|
||||
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
|
||||
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
|
||||
|
||||
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<NonnullRefPtrVector<PhysicalPage>> {
|
||||
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> {
|
||||
// We need to make sure we don't touch pages that we have committed to
|
||||
if (global_data.system_memory_info.physical_pages_uncommitted < page_count)
|
||||
return ENOMEM;
|
||||
|
@ -1033,7 +1033,7 @@ ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_ph
|
|||
}));
|
||||
|
||||
{
|
||||
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
|
||||
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
|
||||
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
|
||||
}
|
||||
return physical_pages;
|
||||
|
|
|
@ -166,13 +166,13 @@ public:
|
|||
|
||||
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
|
||||
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
|
||||
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
|
||||
ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> allocate_contiguous_physical_pages(size_t size);
|
||||
void deallocate_physical_page(PhysicalAddress);
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages);
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
|
|
|
@ -75,7 +75,7 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(size_t page
|
|||
return try_create(taken_lower, taken_upper);
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
|
||||
Vector<NonnullRefPtr<PhysicalPage>> PhysicalRegion::take_contiguous_free_pages(size_t count)
|
||||
{
|
||||
auto rounded_page_count = next_power_of_two(count);
|
||||
auto order = count_trailing_zeroes(rounded_page_count);
|
||||
|
@ -95,7 +95,7 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
|
|||
if (!page_base.has_value())
|
||||
return {};
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> physical_pages;
|
||||
Vector<NonnullRefPtr<PhysicalPage>> physical_pages;
|
||||
physical_pages.ensure_capacity(count);
|
||||
|
||||
for (size_t i = 0; i < count; ++i)
|
||||
|
|
|
@ -34,7 +34,7 @@ public:
|
|||
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(size_t);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page();
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
|
||||
Vector<NonnullRefPtr<PhysicalPage>> take_contiguous_free_pages(size_t count);
|
||||
void return_page(PhysicalAddress);
|
||||
|
||||
private:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue