mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 07:18:11 +00:00
Kernel: Remove API for requesting physical allocation alignment
Nobody was using this API to request anythign about `PAGE_SIZE` alignment, so let's get rid of it for now. We can reimplement it if we end up needing it. Also note that it wasn't actually used anywhere.
This commit is contained in:
parent
ba87571366
commit
be90e51355
6 changed files with 11 additions and 14 deletions
|
@ -10,9 +10,9 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
RefPtr<ContiguousVMObject> ContiguousVMObject::try_create_with_size(size_t size, size_t physical_alignment)
|
||||
RefPtr<ContiguousVMObject> ContiguousVMObject::try_create_with_size(size_t size)
|
||||
{
|
||||
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size, physical_alignment);
|
||||
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
|
||||
if (contiguous_physical_pages.is_empty())
|
||||
return {};
|
||||
return adopt_ref_if_nonnull(new (nothrow) ContiguousVMObject(size, contiguous_physical_pages));
|
||||
|
|
|
@ -15,7 +15,7 @@ class ContiguousVMObject final : public VMObject {
|
|||
public:
|
||||
virtual ~ContiguousVMObject() override;
|
||||
|
||||
static RefPtr<ContiguousVMObject> try_create_with_size(size_t, size_t physical_alignment = PAGE_SIZE);
|
||||
static RefPtr<ContiguousVMObject> try_create_with_size(size_t);
|
||||
|
||||
private:
|
||||
explicit ContiguousVMObject(size_t, NonnullRefPtrVector<PhysicalPage>&);
|
||||
|
|
|
@ -629,14 +629,14 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
|||
return region->handle_fault(fault, lock);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, size_t physical_alignment, Region::Cacheable cacheable)
|
||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
auto vmobject = ContiguousVMObject::try_create_with_size(size, physical_alignment);
|
||||
auto vmobject = ContiguousVMObject::try_create_with_size(size);
|
||||
if (!vmobject) {
|
||||
kernel_page_directory().range_allocator().deallocate(range.value());
|
||||
return {};
|
||||
|
@ -834,7 +834,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
|
|||
return page;
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment)
|
||||
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
@ -842,7 +842,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
|
|||
NonnullRefPtrVector<PhysicalPage> physical_pages;
|
||||
|
||||
for (auto& region : m_super_physical_regions) {
|
||||
physical_pages = region.take_contiguous_free_pages(count, physical_alignment);
|
||||
physical_pages = region.take_contiguous_free_pages(count);
|
||||
if (!physical_pages.is_empty())
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -145,10 +145,10 @@ public:
|
|||
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
|
||||
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
|
||||
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
|
||||
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE);
|
||||
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
|
||||
void deallocate_physical_page(PhysicalAddress);
|
||||
|
||||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
|
|
|
@ -91,11 +91,8 @@ OwnPtr<PhysicalRegion> PhysicalRegion::try_take_pages_from_beginning(unsigned pa
|
|||
return taken_region;
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, size_t physical_alignment)
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count)
|
||||
{
|
||||
// FIXME: Care about alignment.
|
||||
(void)physical_alignment;
|
||||
|
||||
auto rounded_page_count = next_power_of_two(count);
|
||||
auto order = __builtin_ctz(rounded_page_count);
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ public:
|
|||
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page();
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count);
|
||||
void return_page(PhysicalAddress);
|
||||
|
||||
private:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue