1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 04:37:44 +00:00

Kernel: Protect the PageDirectory from concurrent access

This commit is contained in:
Tom 2020-10-31 17:19:18 -06:00 committed by Andreas Kling
parent 2b25a89ab5
commit 5b38132e3c
4 changed files with 14 additions and 3 deletions

View file

@ -87,6 +87,7 @@ MemoryManager::~MemoryManager()
void MemoryManager::protect_kernel_image()
{
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments.
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@ -193,10 +194,11 @@ void MemoryManager::parse_memory_map()
ASSERT(m_user_physical_pages > 0);
}
PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -213,6 +215,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -259,6 +262,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(s_mm_lock.own_lock());
ASSERT(page_directory.get_lock().own_lock());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;

View file

@ -191,7 +191,7 @@ private:
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
PageTableEntry* quickmap_pt(PhysicalAddress);
PageTableEntry* pte(const PageDirectory&, VirtualAddress);
PageTableEntry* pte(PageDirectory&, VirtualAddress);
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
void release_pte(PageDirectory&, VirtualAddress, bool);

View file

@ -57,6 +57,8 @@ public:
Process* process() { return m_process; }
const Process* process() const { return m_process; }
RecursiveSpinLock& get_lock() { return m_lock; }
private:
PageDirectory(Process&, const RangeAllocator* parent_range_allocator);
PageDirectory();
@ -67,6 +69,7 @@ private:
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[4];
HashMap<u32, RefPtr<PhysicalPage>> m_page_tables;
RecursiveSpinLock m_lock;
};
}

View file

@ -229,6 +229,7 @@ Bitmap& Region::ensure_cow_map() const
bool Region::map_individual_page_impl(size_t page_index)
{
ASSERT(m_page_directory->get_lock().own_lock());
auto page_vaddr = vaddr_from_page_index(page_index);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte) {
@ -260,8 +261,9 @@ bool Region::map_individual_page_impl(size_t page_index)
bool Region::remap_page(size_t page_index, bool with_flush)
{
ASSERT(m_page_directory);
ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory);
ScopedSpinLock page_lock(m_page_directory->get_lock());
ASSERT(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)
@ -273,6 +275,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory);
ScopedSpinLock page_lock(m_page_directory->get_lock());
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
@ -302,6 +305,7 @@ void Region::set_page_directory(PageDirectory& page_directory)
bool Region::map(PageDirectory& page_directory)
{
ScopedSpinLock lock(s_mm_lock);
ScopedSpinLock page_lock(page_directory.get_lock());
set_page_directory(page_directory);
#ifdef MM_DEBUG
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";