mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 18:58:12 +00:00
Kernel: Move PhysicalPage classes out of the heap into an array
By moving the PhysicalPage classes out of the kernel heap into a static array, one for each physical page, we can avoid the added overhead and easily find them by indexing into an array. This also wraps the PhysicalPage into a PhysicalPageEntry, which allows us to re-use each slot with information where to find the next free page.
This commit is contained in:
parent
ad5d9d648b
commit
87dc4c3d2c
11 changed files with 285 additions and 43 deletions
|
@ -446,7 +446,7 @@ long_mode_supported:
|
|||
addl $4096, %eax
|
||||
loop 1b
|
||||
|
||||
/* pseudo identity map the 3072-3102MB range */
|
||||
/* pseudo identity map the 3072-3104MB range */
|
||||
movl $(512 * 16), %ecx
|
||||
movl $(boot_pd3_pts - KERNEL_BASE), %edi
|
||||
xorl %eax, %eax
|
||||
|
|
|
@ -15,6 +15,13 @@ typedef u64 PhysicalSize;
|
|||
class PhysicalAddress {
|
||||
public:
|
||||
ALWAYS_INLINE static PhysicalPtr physical_page_base(PhysicalPtr page_address) { return page_address & ~(PhysicalPtr)0xfff; }
|
||||
ALWAYS_INLINE static size_t physical_page_index(PhysicalPtr page_address)
|
||||
{
|
||||
auto page_index = page_address >> 12;
|
||||
if constexpr (sizeof(size_t) < sizeof(PhysicalPtr))
|
||||
VERIFY(!(page_index & ~(PhysicalPtr)((size_t)-1)));
|
||||
return (size_t)(page_index);
|
||||
}
|
||||
|
||||
PhysicalAddress() = default;
|
||||
explicit PhysicalAddress(PhysicalPtr address)
|
||||
|
|
|
@ -12,3 +12,10 @@
|
|||
#define UNMAP_AFTER_INIT NEVER_INLINE __attribute__((section(".unmap_after_init")))
|
||||
|
||||
#define KERNEL_BASE 0xC0000000
|
||||
#define KERNEL_PD_OFFSET 0x2000000
|
||||
#define KERNEL_PD_END 0xF1000000
|
||||
#define KERNEL_PT1024_BASE 0xFFE00000
|
||||
#define KERNEL_QUICKMAP_PT (KERNEL_PT1024_BASE + 0x6000)
|
||||
#define KERNEL_QUICKMAP_PD (KERNEL_PT1024_BASE + 0x7000)
|
||||
#define KERNEL_QUICKMAP_PER_CPU_BASE (KERNEL_PT1024_BASE + 0x8000)
|
||||
#define KERNEL_PHYSICAL_PAGES_BASE (KERNEL_BASE + KERNEL_PD_OFFSET)
|
||||
|
|
|
@ -59,8 +59,9 @@ bool MemoryManager::is_initialized()
|
|||
|
||||
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
|
||||
{
|
||||
s_the = this;
|
||||
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
m_kernel_page_directory = PageDirectory::create_kernel_page_directory();
|
||||
parse_memory_map();
|
||||
write_cr3(kernel_page_directory().cr3());
|
||||
protect_kernel_image();
|
||||
|
@ -192,10 +193,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
auto* mmap_begin = reinterpret_cast<multiboot_memory_map_t*>(low_physical_to_virtual(multiboot_info_ptr->mmap_addr));
|
||||
auto* mmap_end = reinterpret_cast<multiboot_memory_map_t*>(low_physical_to_virtual(multiboot_info_ptr->mmap_addr) + multiboot_info_ptr->mmap_length);
|
||||
|
||||
for (auto& used_range : m_used_memory_ranges) {
|
||||
dmesgln("MM: {} range @ {} - {}", UserMemoryRangeTypeNames[static_cast<int>(used_range.type)], used_range.start, used_range.end);
|
||||
}
|
||||
|
||||
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
||||
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
|
||||
|
||||
|
@ -273,15 +270,18 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages))),
|
||||
PhysicalAddress(virtual_to_low_physical(FlatPtr(super_pages + sizeof(super_pages))))));
|
||||
|
||||
for (auto& region : m_super_physical_regions) {
|
||||
for (auto& region : m_super_physical_regions)
|
||||
m_system_memory_info.super_physical_pages += region.finalize_capacity();
|
||||
dmesgln("MM: Super physical region: {} - {}", region.lower(), region.upper());
|
||||
|
||||
for (auto& region : m_user_physical_regions)
|
||||
m_system_memory_info.user_physical_pages += region.finalize_capacity();
|
||||
|
||||
register_reserved_ranges();
|
||||
for (auto& range : m_reserved_memory_ranges) {
|
||||
dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
|
||||
}
|
||||
|
||||
for (auto& region : m_user_physical_regions) {
|
||||
m_system_memory_info.user_physical_pages += region.finalize_capacity();
|
||||
dmesgln("MM: User physical region: {} - {}", region.lower(), region.upper());
|
||||
}
|
||||
initialize_physical_pages();
|
||||
|
||||
VERIFY(m_system_memory_info.super_physical_pages > 0);
|
||||
VERIFY(m_system_memory_info.user_physical_pages > 0);
|
||||
|
@ -289,10 +289,188 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
// We start out with no committed pages
|
||||
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
|
||||
|
||||
register_reserved_ranges();
|
||||
for (auto& range : m_reserved_memory_ranges) {
|
||||
dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
|
||||
for (auto& used_range : m_used_memory_ranges) {
|
||||
dmesgln("MM: {} range @ {} - {}", UserMemoryRangeTypeNames[static_cast<int>(used_range.type)], used_range.start, used_range.end);
|
||||
}
|
||||
|
||||
for (auto& region : m_super_physical_regions)
|
||||
dmesgln("MM: Super physical region: {} - {}", region.lower(), region.upper());
|
||||
|
||||
for (auto& region : m_user_physical_regions)
|
||||
dmesgln("MM: User physical region: {} - {}", region.lower(), region.upper());
|
||||
}
|
||||
|
||||
extern "C" PageDirectoryEntry boot_pd3[1024];
|
||||
|
||||
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
||||
{
|
||||
// No physical memory region should be using any memory yet!
|
||||
for (auto& region : m_user_physical_regions)
|
||||
VERIFY(region.used() == 0);
|
||||
|
||||
// We assume that the physical page range is contiguous and doesn't contain huge gaps!
|
||||
PhysicalAddress highest_physical_address;
|
||||
for (auto& range : m_used_memory_ranges) {
|
||||
if (range.end.get() > highest_physical_address.get())
|
||||
highest_physical_address = range.end;
|
||||
}
|
||||
for (auto& region : m_physical_memory_ranges) {
|
||||
auto range_end = PhysicalAddress(region.start).offset(region.length);
|
||||
if (range_end.get() > highest_physical_address.get())
|
||||
highest_physical_address = range_end;
|
||||
}
|
||||
|
||||
// Calculate how many total physical pages the array will have
|
||||
m_physical_page_entries_count = PhysicalAddress::physical_page_index(highest_physical_address.get()) + 1;
|
||||
VERIFY(m_physical_page_entries_count != 0);
|
||||
VERIFY(!Checked<decltype(m_physical_page_entries_count)>::multiplication_would_overflow(m_physical_page_entries_count, sizeof(PhysicalPageEntry)));
|
||||
|
||||
// Calculate how many bytes the array will consume
|
||||
auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
|
||||
auto physical_page_array_pages = page_round_up(physical_page_array_size) / PAGE_SIZE;
|
||||
VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
|
||||
|
||||
// Calculate how many page tables we will need to be able to map them all
|
||||
auto needed_page_table_count = (physical_page_array_pages + 512 - 1) / 512;
|
||||
|
||||
auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
|
||||
|
||||
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
|
||||
RefPtr<PhysicalRegion> found_region;
|
||||
for (auto& region : m_user_physical_regions) {
|
||||
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
|
||||
found_region = region;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_region) {
|
||||
dmesgln("MM: Need {} bytes for physical page management, but no memory region is large enough!", physical_page_array_pages_and_page_tables_count);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
VERIFY(m_system_memory_info.user_physical_pages >= physical_page_array_pages_and_page_tables_count);
|
||||
m_system_memory_info.user_physical_pages -= physical_page_array_pages_and_page_tables_count;
|
||||
|
||||
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
|
||||
// We're stealing the entire region
|
||||
m_user_physical_regions.remove_first_matching([&](auto& region) {
|
||||
return region == found_region.ptr();
|
||||
});
|
||||
m_physical_pages_region = found_region.release_nonnull();
|
||||
} else {
|
||||
m_physical_pages_region = found_region->take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
|
||||
}
|
||||
m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
|
||||
|
||||
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
|
||||
m_kernel_page_directory = PageDirectory::create_kernel_page_directory();
|
||||
|
||||
// Allocate a virtual address range for our array
|
||||
auto range = m_kernel_page_directory->range_allocator().allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
|
||||
if (!range.has_value()) {
|
||||
dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
|
||||
// try to map the entire region into kernel space so we always have it
|
||||
// We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
|
||||
// mapped yet so we can't create them
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
||||
// Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
|
||||
auto page_tables_base = m_physical_pages_region->lower();
|
||||
auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
|
||||
auto physical_page_array_current_page = physical_page_array_base.get();
|
||||
auto virtual_page_array_base = range.value().base().get();
|
||||
auto virtual_page_array_current_page = virtual_page_array_base;
|
||||
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
||||
auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
|
||||
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
||||
auto* pt = reinterpret_cast<PageTableEntry*>(quickmap_page(pt_paddr));
|
||||
__builtin_memset(pt, 0, PAGE_SIZE);
|
||||
for (size_t pte_index = 0; pte_index < PAGE_SIZE / sizeof(PageTableEntry); pte_index++) {
|
||||
auto& pte = pt[pte_index];
|
||||
pte.set_physical_page_base(physical_page_array_current_page);
|
||||
pte.set_user_allowed(false);
|
||||
pte.set_writable(true);
|
||||
if (Processor::current().has_feature(CPUFeature::NX))
|
||||
pte.set_execute_disabled(false);
|
||||
pte.set_global(true);
|
||||
pte.set_present(true);
|
||||
|
||||
physical_page_array_current_page += PAGE_SIZE;
|
||||
virtual_page_array_current_page += PAGE_SIZE;
|
||||
}
|
||||
unquickmap_page();
|
||||
|
||||
// Hook the page table into the kernel page directory
|
||||
VERIFY(((virtual_page_base_for_this_pt >> 30) & 0x3) == 3);
|
||||
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
|
||||
|
||||
u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
|
||||
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd3_paddr));
|
||||
PageDirectoryEntry& pde = pd[page_directory_index];
|
||||
|
||||
VERIFY(!pde.is_present()); // Nothing should be using this PD yet
|
||||
|
||||
// We can't use ensure_pte quite yet!
|
||||
pde.set_page_table_base(pt_paddr.get());
|
||||
pde.set_user_allowed(false);
|
||||
pde.set_present(true);
|
||||
pde.set_writable(true);
|
||||
pde.set_global(true);
|
||||
|
||||
unquickmap_page();
|
||||
|
||||
flush_tlb_local(VirtualAddress(virtual_page_base_for_this_pt));
|
||||
}
|
||||
|
||||
// We now have the entire PhysicalPageEntry array mapped!
|
||||
m_physical_page_entries = (PhysicalPageEntry*)range.value().base().get();
|
||||
for (size_t i = 0; i < m_physical_page_entries_count; i++)
|
||||
new (&m_physical_page_entries[i]) PageTableEntry();
|
||||
m_physical_page_entries_free = m_physical_page_entries_count;
|
||||
|
||||
// Now we should be able to allocate PhysicalPage instances,
|
||||
// so finish setting up the kernel page directory
|
||||
m_kernel_page_directory->allocate_kernel_directory();
|
||||
|
||||
// Now create legit PhysicalPage objects for the page tables we created, so that
|
||||
// we can put them into kernel_page_directory().m_page_tables
|
||||
auto& kernel_page_tables = kernel_page_directory().m_page_tables;
|
||||
virtual_page_array_current_page = virtual_page_array_base;
|
||||
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
||||
VERIFY(virtual_page_array_current_page <= range.value().end().get());
|
||||
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
||||
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
|
||||
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
|
||||
auto physical_page = adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(false, false));
|
||||
auto result = kernel_page_tables.set(virtual_page_array_current_page & ~0x1fffff, move(physical_page));
|
||||
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
||||
|
||||
virtual_page_array_current_page += (PAGE_SIZE / sizeof(PhysicalPageEntry)) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
dmesgln("MM: Physical page entries: {} - {}", range.value().base(), range.value().end());
|
||||
}
|
||||
|
||||
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
|
||||
{
|
||||
VERIFY(m_physical_page_entries);
|
||||
auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());
|
||||
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
|
||||
return m_physical_page_entries[physical_page_entry_index];
|
||||
}
|
||||
|
||||
PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
|
||||
{
|
||||
PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, physical_page));
|
||||
VERIFY(m_physical_page_entries);
|
||||
size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
|
||||
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
|
||||
return PhysicalAddress((PhysicalPtr)physical_page_entry_index * PAGE_SIZE);
|
||||
}
|
||||
|
||||
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
||||
|
@ -395,7 +573,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
|
|||
Processor::current().set_mm_data(*mm_data);
|
||||
|
||||
if (cpu == 0) {
|
||||
s_the = new MemoryManager;
|
||||
new MemoryManager;
|
||||
kmalloc_enable_expand();
|
||||
}
|
||||
}
|
||||
|
@ -751,7 +929,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
|
|||
{
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
auto& mm_data = get_data();
|
||||
auto& pte = boot_pd3_pt1023[4];
|
||||
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
|
||||
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
|
||||
if (pte.physical_page_base() != pd_paddr.get()) {
|
||||
pte.set_physical_page_base(pd_paddr.get());
|
||||
|
@ -761,23 +939,23 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
|
|||
// Because we must continue to hold the MM lock while we use this
|
||||
// mapping, it is sufficient to only flush on the current CPU. Other
|
||||
// CPUs trying to use this API must wait on the MM lock anyway
|
||||
flush_tlb_local(VirtualAddress(0xffe04000));
|
||||
flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
|
||||
} else {
|
||||
// Even though we don't allow this to be called concurrently, it's
|
||||
// possible that this PD was mapped on a different CPU and we don't
|
||||
// broadcast the flush. If so, we still need to flush the TLB.
|
||||
if (mm_data.m_last_quickmap_pd != pd_paddr)
|
||||
flush_tlb_local(VirtualAddress(0xffe04000));
|
||||
flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PD));
|
||||
}
|
||||
mm_data.m_last_quickmap_pd = pd_paddr;
|
||||
return (PageDirectoryEntry*)0xffe04000;
|
||||
return (PageDirectoryEntry*)KERNEL_QUICKMAP_PD;
|
||||
}
|
||||
|
||||
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
||||
{
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
auto& mm_data = get_data();
|
||||
auto& pte = boot_pd3_pt1023[0];
|
||||
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
|
||||
if (pte.physical_page_base() != pt_paddr.get()) {
|
||||
pte.set_physical_page_base(pt_paddr.get());
|
||||
pte.set_present(true);
|
||||
|
@ -786,31 +964,31 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
|||
// Because we must continue to hold the MM lock while we use this
|
||||
// mapping, it is sufficient to only flush on the current CPU. Other
|
||||
// CPUs trying to use this API must wait on the MM lock anyway
|
||||
flush_tlb_local(VirtualAddress(0xffe00000));
|
||||
flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
|
||||
} else {
|
||||
// Even though we don't allow this to be called concurrently, it's
|
||||
// possible that this PT was mapped on a different CPU and we don't
|
||||
// broadcast the flush. If so, we still need to flush the TLB.
|
||||
if (mm_data.m_last_quickmap_pt != pt_paddr)
|
||||
flush_tlb_local(VirtualAddress(0xffe00000));
|
||||
flush_tlb_local(VirtualAddress(KERNEL_QUICKMAP_PT));
|
||||
}
|
||||
mm_data.m_last_quickmap_pt = pt_paddr;
|
||||
return (PageTableEntry*)0xffe00000;
|
||||
return (PageTableEntry*)KERNEL_QUICKMAP_PT;
|
||||
}
|
||||
|
||||
u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
|
||||
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
auto& mm_data = get_data();
|
||||
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
||||
u32 pte_idx = 8 + Processor::id();
|
||||
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
|
||||
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
||||
|
||||
auto& pte = boot_pd3_pt1023[pte_idx];
|
||||
if (pte.physical_page_base() != physical_page.paddr().get()) {
|
||||
pte.set_physical_page_base(physical_page.paddr().get());
|
||||
if (pte.physical_page_base() != physical_address.get()) {
|
||||
pte.set_physical_page_base(physical_address.get());
|
||||
pte.set_present(true);
|
||||
pte.set_writable(true);
|
||||
pte.set_user_allowed(false);
|
||||
|
@ -825,8 +1003,8 @@ void MemoryManager::unquickmap_page()
|
|||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto& mm_data = get_data();
|
||||
VERIFY(mm_data.m_quickmap_in_use.is_locked());
|
||||
u32 pte_idx = 8 + Processor::id();
|
||||
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
|
||||
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
|
||||
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
||||
auto& pte = boot_pd3_pt1023[pte_idx];
|
||||
pte.clear();
|
||||
flush_tlb_local(vaddr);
|
||||
|
|
|
@ -52,12 +52,14 @@ enum class UsedMemoryRangeType {
|
|||
LowMemory = 0,
|
||||
Kernel,
|
||||
BootModule,
|
||||
PhysicalPages,
|
||||
};
|
||||
|
||||
static constexpr StringView UserMemoryRangeTypeNames[] {
|
||||
"Low memory",
|
||||
"Kernel",
|
||||
"Boot module",
|
||||
"Physical Pages"
|
||||
};
|
||||
|
||||
struct UsedMemoryRange {
|
||||
|
@ -195,10 +197,14 @@ public:
|
|||
const Vector<UsedMemoryRange>& used_memory_ranges() { return m_used_memory_ranges; }
|
||||
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, const Range&) const;
|
||||
|
||||
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
|
||||
PhysicalAddress get_physical_address(PhysicalPage const&);
|
||||
|
||||
private:
|
||||
MemoryManager();
|
||||
~MemoryManager();
|
||||
|
||||
void initialize_physical_pages();
|
||||
void register_reserved_ranges();
|
||||
|
||||
void register_vmobject(VMObject&);
|
||||
|
@ -216,7 +222,12 @@ private:
|
|||
static Region* find_region_from_vaddr(VirtualAddress);
|
||||
|
||||
RefPtr<PhysicalPage> find_free_user_physical_page(bool);
|
||||
u8* quickmap_page(PhysicalPage&);
|
||||
|
||||
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
|
||||
{
|
||||
return quickmap_page(page.paddr());
|
||||
}
|
||||
u8* quickmap_page(PhysicalAddress const&);
|
||||
void unquickmap_page();
|
||||
|
||||
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
|
||||
|
@ -235,6 +246,10 @@ private:
|
|||
|
||||
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
|
||||
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
|
||||
RefPtr<PhysicalRegion> m_physical_pages_region;
|
||||
PhysicalPageEntry* m_physical_page_entries { nullptr };
|
||||
size_t m_physical_page_entries_free { 0 };
|
||||
size_t m_physical_page_entries_count { 0 };
|
||||
|
||||
Region::List m_user_regions;
|
||||
Region::List m_kernel_regions;
|
||||
|
|
|
@ -37,9 +37,12 @@ extern "C" PageDirectoryEntry boot_pd3[1024];
|
|||
|
||||
UNMAP_AFTER_INIT PageDirectory::PageDirectory()
|
||||
{
|
||||
m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + 0x02000000), 0x2f000000);
|
||||
m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + KERNEL_PD_OFFSET), KERNEL_PD_END - (KERNEL_BASE + KERNEL_PD_OFFSET));
|
||||
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
|
||||
{
|
||||
// Adopt the page tables already set up by boot.S
|
||||
#if ARCH(X86_64)
|
||||
PhysicalAddress boot_pml4t_paddr(virtual_to_low_physical((FlatPtr)boot_pml4t));
|
||||
|
|
|
@ -32,6 +32,8 @@ public:
|
|||
|
||||
~PageDirectory();
|
||||
|
||||
void allocate_kernel_directory();
|
||||
|
||||
FlatPtr cr3() const
|
||||
{
|
||||
#if ARCH(X86_64)
|
||||
|
|
|
@ -12,16 +12,21 @@ namespace Kernel {
|
|||
|
||||
NonnullRefPtr<PhysicalPage> PhysicalPage::create(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
|
||||
{
|
||||
return adopt_ref(*new PhysicalPage(paddr, supervisor, may_return_to_freelist));
|
||||
auto& physical_page_entry = MM.get_physical_page_entry(paddr);
|
||||
return adopt_ref(*new (&physical_page_entry.physical_page) PhysicalPage(supervisor, may_return_to_freelist));
|
||||
}
|
||||
|
||||
PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist)
|
||||
PhysicalPage::PhysicalPage(bool supervisor, bool may_return_to_freelist)
|
||||
: m_may_return_to_freelist(may_return_to_freelist)
|
||||
, m_supervisor(supervisor)
|
||||
, m_paddr(paddr)
|
||||
{
|
||||
}
|
||||
|
||||
PhysicalAddress PhysicalPage::paddr() const
|
||||
{
|
||||
return MM.get_physical_address(*this);
|
||||
}
|
||||
|
||||
void PhysicalPage::return_to_freelist() const
|
||||
{
|
||||
VERIFY((paddr().get() & ~PAGE_MASK) == 0);
|
||||
|
|
|
@ -18,11 +18,8 @@ class PhysicalPage {
|
|||
friend class PageDirectory;
|
||||
friend class VMObject;
|
||||
|
||||
MAKE_SLAB_ALLOCATED(PhysicalPage);
|
||||
AK_MAKE_NONMOVABLE(PhysicalPage);
|
||||
|
||||
public:
|
||||
PhysicalAddress paddr() const { return m_paddr; }
|
||||
PhysicalAddress paddr() const;
|
||||
|
||||
void ref()
|
||||
{
|
||||
|
@ -34,7 +31,7 @@ public:
|
|||
if (m_ref_count.fetch_sub(1, AK::memory_order_acq_rel) == 1) {
|
||||
if (m_may_return_to_freelist)
|
||||
return_to_freelist();
|
||||
delete this;
|
||||
this->~PhysicalPage(); // delete in place
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,7 +43,7 @@ public:
|
|||
bool is_lazy_committed_page() const;
|
||||
|
||||
private:
|
||||
PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
|
||||
PhysicalPage(bool supervisor, bool may_return_to_freelist = true);
|
||||
~PhysicalPage() = default;
|
||||
|
||||
void return_to_freelist() const;
|
||||
|
@ -54,7 +51,14 @@ private:
|
|||
Atomic<u32> m_ref_count { 1 };
|
||||
bool m_may_return_to_freelist { true };
|
||||
bool m_supervisor { false };
|
||||
PhysicalAddress m_paddr;
|
||||
};
|
||||
|
||||
struct PhysicalPageEntry {
|
||||
// This structure either holds a valid PhysicalPage
|
||||
// or a PhysicalAllocator's free list information!
|
||||
union {
|
||||
PhysicalPage physical_page;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,25 @@ unsigned PhysicalRegion::finalize_capacity()
|
|||
return size();
|
||||
}
|
||||
|
||||
NonnullRefPtr<PhysicalRegion> PhysicalRegion::take_pages_from_beginning(unsigned page_count)
|
||||
{
|
||||
VERIFY(m_used == 0);
|
||||
VERIFY(page_count > 0);
|
||||
VERIFY(page_count < m_pages);
|
||||
auto taken_lower = m_lower;
|
||||
auto taken_upper = taken_lower.offset((PhysicalPtr)page_count * PAGE_SIZE);
|
||||
m_lower = m_lower.offset((PhysicalPtr)page_count * PAGE_SIZE);
|
||||
|
||||
// TODO: find a more elegant way to re-init the existing region
|
||||
m_pages = 0;
|
||||
m_bitmap = {}; // FIXME: Kind of wasteful
|
||||
finalize_capacity();
|
||||
|
||||
auto taken_region = create(taken_lower, taken_upper);
|
||||
taken_region->finalize_capacity();
|
||||
return taken_region;
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment)
|
||||
{
|
||||
VERIFY(m_pages);
|
||||
|
|
|
@ -31,6 +31,8 @@ public:
|
|||
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
|
||||
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
|
||||
|
||||
NonnullRefPtr<PhysicalRegion> take_pages_from_beginning(unsigned);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page(bool supervisor);
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
|
||||
void return_page(const PhysicalPage& page);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue