1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 11:08:11 +00:00

Kernel: Allow specifying a physical alignment when allocating

Some drivers may require allocating contiguous physical pages with
a specific alignment for the physical address.
This commit is contained in:
Tom 2020-12-05 22:49:24 -07:00 committed by Andreas Kling
parent d5472426ec
commit affb4ef01b
6 changed files with 29 additions and 23 deletions

View file

@ -31,15 +31,15 @@
namespace Kernel {
NonnullRefPtr<ContiguousVMObject> ContiguousVMObject::create_with_size(size_t size)
NonnullRefPtr<ContiguousVMObject> ContiguousVMObject::create_with_size(size_t size, size_t physical_alignment)
{
return adopt(*new ContiguousVMObject(size));
return adopt(*new ContiguousVMObject(size, physical_alignment));
}
ContiguousVMObject::ContiguousVMObject(size_t size)
ContiguousVMObject::ContiguousVMObject(size_t size, size_t physical_alignment)
: VMObject(size)
{
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size, physical_alignment);
for (size_t i = 0; i < page_count(); i++) {
physical_pages()[i] = contiguous_physical_pages[i];
dbgln<CONTIGUOUS_VMOBJECT_DEBUG>("Contiguous page[{}]: {}", i, physical_pages()[i]->paddr());

View file

@ -35,10 +35,10 @@ class ContiguousVMObject final : public VMObject {
public:
virtual ~ContiguousVMObject() override;
static NonnullRefPtr<ContiguousVMObject> create_with_size(size_t);
static NonnullRefPtr<ContiguousVMObject> create_with_size(size_t, size_t physical_alignment = PAGE_SIZE);
private:
explicit ContiguousVMObject(size_t);
explicit ContiguousVMObject(size_t, size_t physical_alignment);
explicit ContiguousVMObject(const ContiguousVMObject&);
virtual const char* class_name() const override { return "ContiguousVMObject"; }

View file

@ -394,14 +394,14 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault, lock);
}
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, size_t physical_alignment, bool user_accessible, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
auto vmobject = ContiguousVMObject::create_with_size(size);
auto vmobject = ContiguousVMObject::create_with_size(size, physical_alignment);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
}
@ -610,7 +610,7 @@ void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page
ASSERT_NOT_REACHED();
}
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
@ -618,9 +618,9 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
NonnullRefPtrVector<PhysicalPage> physical_pages;
for (auto& region : m_super_physical_regions) {
physical_pages = region.take_contiguous_free_pages((count), true);
physical_pages = region.take_contiguous_free_pages(count, true, physical_alignment);
if (!physical_pages.is_empty())
break;
continue;
}
if (physical_pages.is_empty()) {

View file

@ -137,11 +137,11 @@ public:
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size);
NonnullRefPtrVector<PhysicalPage> allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment = PAGE_SIZE);
void deallocate_user_physical_page(const PhysicalPage&);
void deallocate_supervisor_physical_page(const PhysicalPage&);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, size_t physical_alignment = PAGE_SIZE, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, AllocationStrategy strategy = AllocationStrategy::Reserve, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);

View file

@ -65,7 +65,7 @@ unsigned PhysicalRegion::finalize_capacity()
return size();
}
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor)
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment)
{
ASSERT(m_pages);
ASSERT(m_used != m_pages);
@ -73,18 +73,19 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
NonnullRefPtrVector<PhysicalPage> physical_pages;
physical_pages.ensure_capacity(count);
auto first_contiguous_page = find_contiguous_free_pages(count);
auto first_contiguous_page = find_contiguous_free_pages(count, physical_alignment);
for (size_t index = 0; index < count; index++)
physical_pages.append(PhysicalPage::create(m_lower.offset(PAGE_SIZE * (index + first_contiguous_page)), supervisor));
return physical_pages;
}
unsigned PhysicalRegion::find_contiguous_free_pages(size_t count)
unsigned PhysicalRegion::find_contiguous_free_pages(size_t count, size_t physical_alignment)
{
ASSERT(count != 0);
ASSERT(physical_alignment % PAGE_SIZE == 0);
// search from the last page we allocated
auto range = find_and_allocate_contiguous_range(count);
auto range = find_and_allocate_contiguous_range(count, physical_alignment / PAGE_SIZE);
ASSERT(range.has_value());
return range.value();
}
@ -118,16 +119,21 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
return page_index;
}
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count)
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count, unsigned alignment)
{
ASSERT(count != 0);
size_t found_pages_count = 0;
auto first_index = m_bitmap.find_longest_range_of_unset_bits(count, found_pages_count);
// TODO: Improve how we deal with alignment != 1
auto first_index = m_bitmap.find_longest_range_of_unset_bits(count + alignment - 1, found_pages_count);
if (!first_index.has_value())
return {};
auto page = first_index.value();
if (count == found_pages_count) {
if (alignment != 1) {
auto lower_page = m_lower.get() / PAGE_SIZE;
page = ((lower_page + page + alignment - 1) & ~(alignment - 1)) - lower_page;
}
if (found_pages_count >= count) {
m_bitmap.set_range<true>(page, count);
m_used += count;
m_free_hint = first_index.value() + count + 1; // Just a guess

View file

@ -52,12 +52,12 @@ public:
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
void return_page(const PhysicalPage& page);
private:
unsigned find_contiguous_free_pages(size_t count);
Optional<unsigned> find_and_allocate_contiguous_range(size_t count);
unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
Optional<unsigned> find_and_allocate_contiguous_range(size_t count, unsigned alignment = 1);
Optional<unsigned> find_one_free_page();
void free_page_at(PhysicalAddress addr);