1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 19:28:12 +00:00

Kernel: Use PAE to allow accessing all physical memory beyond 4GB

We already use PAE for the NX bit, but this changes the PhysicalAddress
structure to be able to hold 64 bit physical addresses. This allows us
to use all the available physical memory.
This commit is contained in:
Tom 2021-07-06 21:35:15 -06:00 committed by Andreas Kling
parent 658b41a06c
commit ad5d9d648b
7 changed files with 87 additions and 86 deletions

View file

@ -200,7 +200,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
auto start_address = PhysicalAddress(mmap->addr);
auto length = static_cast<size_t>(mmap->len);
auto length = mmap->len;
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
@ -227,9 +227,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
continue;
if ((mmap->addr + mmap->len) > 0xffffffff)
continue;
// Fix up unaligned memory regions.
auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
if (diff != 0) {
@ -247,7 +244,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
continue;
}
for (size_t page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
for (PhysicalSize page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
auto addr = PhysicalAddress(page_base);
// Skip used memory ranges.
@ -277,20 +274,21 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages))))));
for (auto& region : m_super_physical_regions) {
m_super_physical_pages += region.finalize_capacity();
m_system_memory_info.super_physical_pages += region.finalize_capacity();
dmesgln("MM: Super physical region: {} - {}", region.lower(), region.upper());
}
for (auto& region : m_user_physical_regions) {
m_user_physical_pages += region.finalize_capacity();
m_system_memory_info.user_physical_pages += region.finalize_capacity();
dmesgln("MM: User physical region: {} - {}", region.lower(), region.upper());
}
VERIFY(m_super_physical_pages > 0);
VERIFY(m_user_physical_pages > 0);
VERIFY(m_system_memory_info.super_physical_pages > 0);
VERIFY(m_system_memory_info.user_physical_pages > 0);
// We start out with no committed pages
m_user_physical_pages_uncommitted = m_user_physical_pages.load();
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
register_reserved_ranges();
for (auto& range : m_reserved_memory_ranges) {
dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
@ -534,11 +532,11 @@ bool MemoryManager::commit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
ScopedSpinLock lock(s_mm_lock);
if (m_user_physical_pages_uncommitted < page_count)
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
return false;
m_user_physical_pages_uncommitted -= page_count;
m_user_physical_pages_committed += page_count;
m_system_memory_info.user_physical_pages_uncommitted -= page_count;
m_system_memory_info.user_physical_pages_committed += page_count;
return true;
}
@ -546,10 +544,10 @@ void MemoryManager::uncommit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
ScopedSpinLock lock(s_mm_lock);
VERIFY(m_user_physical_pages_committed >= page_count);
VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
m_user_physical_pages_uncommitted += page_count;
m_user_physical_pages_committed -= page_count;
m_system_memory_info.user_physical_pages_uncommitted += page_count;
m_system_memory_info.user_physical_pages_committed -= page_count;
}
void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
@ -560,12 +558,12 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
continue;
region.return_page(page);
--m_user_physical_pages_used;
--m_system_memory_info.user_physical_pages_used;
// Always return pages to the uncommitted pool. Pages that were
// committed and allocated are only freed upon request. Once
// returned there is no guarantee being able to get them back.
++m_user_physical_pages_uncommitted;
++m_system_memory_info.user_physical_pages_uncommitted;
return;
}
@ -579,18 +577,18 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
RefPtr<PhysicalPage> page;
if (committed) {
// Draw from the committed pages pool. We should always have these pages available
VERIFY(m_user_physical_pages_committed > 0);
m_user_physical_pages_committed--;
VERIFY(m_system_memory_info.user_physical_pages_committed > 0);
m_system_memory_info.user_physical_pages_committed--;
} else {
// We need to make sure we don't touch pages that we have committed to
if (m_user_physical_pages_uncommitted == 0)
if (m_system_memory_info.user_physical_pages_uncommitted == 0)
return {};
m_user_physical_pages_uncommitted--;
m_system_memory_info.user_physical_pages_uncommitted--;
}
for (auto& region : m_user_physical_regions) {
page = region.take_free_page(false);
if (!page.is_null()) {
++m_user_physical_pages_used;
++m_system_memory_info.user_physical_pages_used;
break;
}
}
@ -659,7 +657,7 @@ void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page
}
region.return_page(page);
--m_super_physical_pages_used;
--m_system_memory_info.super_physical_pages_used;
return;
}
@ -692,7 +690,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
auto cleanup_region = MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * count, "MemoryManager Allocation Sanitization", Region::Access::Read | Region::Access::Write);
fast_u32_fill((u32*)cleanup_region->vaddr().as_ptr(), 0, (PAGE_SIZE * count) / sizeof(u32));
m_super_physical_pages_used += count;
m_system_memory_info.super_physical_pages_used += count;
return physical_pages;
}
@ -718,7 +716,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
}
fast_u32_fill((u32*)page->paddr().offset(KERNEL_BASE).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_super_physical_pages_used;
++m_system_memory_info.super_physical_pages_used;
return page;
}
@ -755,7 +753,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[4];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.as_ptr()) {
if (pte.physical_page_base() != pd_paddr.get()) {
pte.set_physical_page_base(pd_paddr.get());
pte.set_present(true);
pte.set_writable(true);
@ -780,7 +778,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[0];
if (pte.physical_page_base() != pt_paddr.as_ptr()) {
if (pte.physical_page_base() != pt_paddr.get()) {
pte.set_physical_page_base(pt_paddr.get());
pte.set_present(true);
pte.set_writable(true);
@ -811,7 +809,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
auto& pte = boot_pd3_pt1023[pte_idx];
if (pte.physical_page_base() != physical_page.paddr().as_ptr()) {
if (pte.physical_page_base() != physical_page.paddr().get()) {
pte.set_physical_page_base(physical_page.paddr().get());
pte.set_present(true);
pte.set_writable(true);

View file

@ -68,7 +68,7 @@ struct UsedMemoryRange {
struct ContiguousReservedMemoryRange {
PhysicalAddress start;
size_t length {};
PhysicalSize length {};
};
enum class PhysicalMemoryRangeType {
@ -83,7 +83,7 @@ enum class PhysicalMemoryRangeType {
struct PhysicalMemoryRange {
PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
PhysicalAddress start;
size_t length {};
PhysicalSize length {};
};
#define MM Kernel::MemoryManager::the()
@ -151,12 +151,20 @@ public:
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
unsigned user_physical_pages_committed() const { return m_user_physical_pages_committed; }
unsigned user_physical_pages_uncommitted() const { return m_user_physical_pages_uncommitted; }
unsigned super_physical_pages() const { return m_super_physical_pages; }
unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 };
PhysicalSize user_physical_pages_used { 0 };
PhysicalSize user_physical_pages_committed { 0 };
PhysicalSize user_physical_pages_uncommitted { 0 };
PhysicalSize super_physical_pages { 0 };
PhysicalSize super_physical_pages_used { 0 };
};
SystemMemoryInfo get_system_memory_info()
{
ScopedSpinLock lock(s_mm_lock);
return m_system_memory_info;
}
template<IteratorFunction<VMObject&> Callback>
static void for_each_vmobject(Callback callback)
@ -223,12 +231,7 @@ private:
RefPtr<PhysicalPage> m_shared_zero_page;
RefPtr<PhysicalPage> m_lazy_committed_page;
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages { 0 };
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_used { 0 };
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_committed { 0 };
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_user_physical_pages_uncommitted { 0 };
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages { 0 };
Atomic<unsigned, AK::MemoryOrder::memory_order_relaxed> m_super_physical_pages_used { 0 };
SystemMemoryInfo m_system_memory_info;
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;

View file

@ -76,7 +76,7 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
// Check if we can draw one from the return queue
if (m_recently_returned.size() > 0) {
u8 index = get_fast_random<u8>() % m_recently_returned.size();
Checked<FlatPtr> local_offset = m_recently_returned[index].get();
Checked<PhysicalPtr> local_offset = m_recently_returned[index].get();
local_offset -= m_lower.get();
m_recently_returned.remove(index);
VERIFY(!local_offset.has_overflow());
@ -131,7 +131,7 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
if (!free_index.has_value())
return nullptr;
return PhysicalPage::create(m_lower.offset(free_index.value() * PAGE_SIZE), supervisor);
return PhysicalPage::create(m_lower.offset((PhysicalPtr)free_index.value() * PAGE_SIZE), supervisor);
}
void PhysicalRegion::free_page_at(PhysicalAddress addr)
@ -142,10 +142,10 @@ void PhysicalRegion::free_page_at(PhysicalAddress addr)
VERIFY_NOT_REACHED();
}
Checked<FlatPtr> local_offset = addr.get();
Checked<PhysicalPtr> local_offset = addr.get();
local_offset -= m_lower.get();
VERIFY(!local_offset.has_overflow());
VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
VERIFY(local_offset.value() < ((PhysicalPtr)m_pages * PAGE_SIZE));
auto page = local_offset.value() / PAGE_SIZE;
m_bitmap.set(page, false);