mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 18:28:12 +00:00
MM: Allocate page tables from a separate set of physical pages.
The old approach only worked because of an overpermissive accident. There's now a concept of supervisor physical pages that can be allocated. They all sit in the low 4 MB of physical memory and are identity mapped, shared between all processes, and only ring 0 can access them.
This commit is contained in:
parent
a5ffa2eec7
commit
683185e4aa
5 changed files with 65 additions and 105 deletions
|
@ -39,15 +39,16 @@ public:
|
|||
return_to_freelist();
|
||||
}
|
||||
|
||||
unsigned retain_count() const { return m_retain_count; }
|
||||
unsigned short retain_count() const { return m_retain_count; }
|
||||
|
||||
private:
|
||||
explicit PhysicalPage(PhysicalAddress paddr);
|
||||
PhysicalPage(PhysicalAddress paddr, bool supervisor);
|
||||
~PhysicalPage() = delete;
|
||||
|
||||
void return_to_freelist();
|
||||
|
||||
unsigned m_retain_count { 1 };
|
||||
unsigned short m_retain_count { 1 };
|
||||
bool m_supervisor { false };
|
||||
PhysicalAddress m_paddr;
|
||||
};
|
||||
|
||||
|
@ -174,16 +175,13 @@ public:
|
|||
|
||||
void populate_page_directory(PageDirectory&);
|
||||
|
||||
byte* create_kernel_alias_for_region(Region&);
|
||||
void remove_kernel_alias_for_region(Region&, byte*);
|
||||
|
||||
void enter_kernel_paging_scope();
|
||||
void enter_process_paging_scope(Process&);
|
||||
|
||||
bool validate_user_read(const Process&, LinearAddress) const;
|
||||
bool validate_user_write(const Process&, LinearAddress) const;
|
||||
|
||||
RetainPtr<PhysicalPage> allocate_physical_page();
|
||||
RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
|
||||
|
||||
void remap_region(Process&, Region&);
|
||||
|
||||
|
@ -196,7 +194,6 @@ private:
|
|||
void register_region(Region&);
|
||||
void unregister_region(Region&);
|
||||
|
||||
LinearAddress allocate_linear_address_range(size_t);
|
||||
void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
|
||||
void unmap_range(PageDirectory&, LinearAddress, size_t);
|
||||
void remap_region_page(PageDirectory&, Region&, unsigned page_index_in_region, bool user_allowed);
|
||||
|
@ -209,8 +206,8 @@ private:
|
|||
|
||||
void map_protected(LinearAddress, size_t length);
|
||||
|
||||
void create_identity_mapping(LinearAddress, size_t length);
|
||||
void remove_identity_mapping(LinearAddress, size_t);
|
||||
void create_identity_mapping(PageDirectory&, LinearAddress, size_t length);
|
||||
void remove_identity_mapping(PageDirectory&, LinearAddress, size_t);
|
||||
|
||||
static Region* region_from_laddr(Process&, LinearAddress);
|
||||
|
||||
|
@ -305,21 +302,16 @@ private:
|
|||
|
||||
OwnPtr<PageDirectory> m_kernel_page_directory;
|
||||
dword* m_page_table_zero;
|
||||
dword* m_page_table_one;
|
||||
|
||||
LinearAddress m_next_laddr;
|
||||
LinearAddress m_quickmap_addr;
|
||||
|
||||
Vector<RetainPtr<PhysicalPage>> m_free_physical_pages;
|
||||
Vector<RetainPtr<PhysicalPage>> m_free_supervisor_physical_pages;
|
||||
|
||||
HashTable<VMObject*> m_vmos;
|
||||
HashTable<Region*> m_regions;
|
||||
};
|
||||
|
||||
struct KernelPagingScope {
|
||||
KernelPagingScope() { MM.enter_kernel_paging_scope(); }
|
||||
~KernelPagingScope() { MM.enter_process_paging_scope(*current); }
|
||||
};
|
||||
|
||||
struct ProcessPagingScope {
|
||||
ProcessPagingScope(Process& process) { MM.enter_process_paging_scope(process); }
|
||||
~ProcessPagingScope() { MM.enter_process_paging_scope(*current); }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue