1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 03:17:35 +00:00

More work on per-process page directories. It basically works now!

I spent some time stuck on a problem where processes would clobber each
other's stacks. Took me a moment to figure out that their stacks
were allocated in the sub-4MB linear address range which is shared
between all processes. Oops!
This commit is contained in:
Andreas Kling 2018-11-01 11:30:48 +01:00
parent 1da0a7c949
commit c45f166c63
5 changed files with 147 additions and 61 deletions

View file

@ -41,7 +41,7 @@ class MemoryManager {
public:
static MemoryManager& the() PURE;
PhysicalAddress pageDirectoryBase() const { return PhysicalAddress(reinterpret_cast<dword>(m_pageDirectory)); }
PhysicalAddress pageDirectoryBase() const { return PhysicalAddress(reinterpret_cast<dword>(m_kernel_page_directory)); }
static void initialize();
@ -65,12 +65,22 @@ public:
void registerZone(Zone&);
void unregisterZone(Zone&);
void populatePageDirectory(Task&);
void populate_page_directory(Task&);
byte* create_kernel_alias_for_region(Task::Region&);
void remove_kernel_alias_for_region(Task::Region&, byte*);
void enter_kernel_paging_scope();
void enter_task_paging_scope(Task&);
private:
MemoryManager();
~MemoryManager();
LinearAddress allocate_linear_address_range(size_t);
void map_region_at_address(dword* page_directory, Task::Region&, LinearAddress);
void unmap_range(dword* page_directory, LinearAddress, size_t);
void initializePaging();
void flushEntireTLB();
void flushTLB(LinearAddress);
@ -162,10 +172,12 @@ private:
PageTableEntry ensurePTE(dword* pageDirectory, LinearAddress);
dword* m_pageDirectory;
dword* m_kernel_page_directory;
dword* m_pageTableZero;
dword* m_pageTableOne;
LinearAddress m_next_laddr;
HashTable<Zone*> m_zones;
Vector<PhysicalAddress> m_freePages;