mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 16:27:35 +00:00
Kernel: Make identity mapping mechanism used during AP boot non-generic
When booting AP's, we identity map a region at 0x8000 while doing the initial bringup sequence. This is the only thing in the kernel that requires an identity mapping, yet we had a bunch of generic API's and a dedicated VirtualRangeAllocator in every PageDirectory for this purpose. This patch simplifies the situation by moving the identity mapping logic to the AP boot code and removing the generic API's.
This commit is contained in:
parent
16ac3bbfd7
commit
cdab5b2091
6 changed files with 20 additions and 23 deletions
|
@ -15,6 +15,7 @@
|
|||
#include <Kernel/IO.h>
|
||||
#include <Kernel/Interrupts/APIC.h>
|
||||
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
|
||||
#include <Kernel/Memory/AnonymousVMObject.h>
|
||||
#include <Kernel/Memory/MemoryManager.h>
|
||||
#include <Kernel/Memory/PageDirectory.h>
|
||||
#include <Kernel/Memory/TypedMapping.h>
|
||||
|
@ -274,6 +275,19 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
|
|||
return true;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size);
|
||||
VERIFY(vmobject);
|
||||
auto region = MM.allocate_kernel_region_with_vmobject(
|
||||
Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size },
|
||||
vmobject.release_nonnull(),
|
||||
{},
|
||||
Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute);
|
||||
VERIFY(region);
|
||||
return region.release_nonnull();
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void APIC::do_boot_aps()
|
||||
{
|
||||
VERIFY(m_processor_enabled_cnt > 1);
|
||||
|
@ -283,7 +297,7 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps()
|
|||
// Also account for the data appended to:
|
||||
// * aps_to_enable u32 values for ap_cpu_init_stacks
|
||||
// * aps_to_enable u32 values for ap_cpu_init_processor_info_array
|
||||
auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute);
|
||||
auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(0x8000), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))));
|
||||
memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
|
||||
|
||||
// Allocate enough stacks for all APs
|
||||
|
@ -362,6 +376,10 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps()
|
|||
}
|
||||
|
||||
dbgln_if(APIC_DEBUG, "APIC: {} processors are initialized and running", m_processor_enabled_cnt);
|
||||
|
||||
// NOTE: Since this region is identity-mapped, we have to unmap it manually to prevent the virtual
|
||||
// address range from leaking into the general virtual range allocator.
|
||||
apic_startup_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void APIC::boot_aps()
|
||||
|
|
|
@ -733,19 +733,6 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
|||
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size);
|
||||
if (!vm_object)
|
||||
return {};
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
|
|
@ -183,7 +183,6 @@ public:
|
|||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
|
||||
|
|
|
@ -38,7 +38,6 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_
|
|||
// make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
|
||||
FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
|
||||
directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
|
||||
directory->m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
|
||||
|
||||
return directory;
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ public:
|
|||
VirtualRangeAllocator& range_allocator() { return m_range_allocator; }
|
||||
VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; }
|
||||
|
||||
VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
|
||||
|
||||
AddressSpace* address_space() { return m_space; }
|
||||
const AddressSpace* address_space() const { return m_space; }
|
||||
|
||||
|
@ -53,7 +51,6 @@ private:
|
|||
|
||||
AddressSpace* m_space { nullptr };
|
||||
VirtualRangeAllocator m_range_allocator;
|
||||
VirtualRangeAllocator m_identity_range_allocator;
|
||||
#if ARCH(X86_64)
|
||||
RefPtr<PhysicalPage> m_pml4t;
|
||||
#endif
|
||||
|
|
|
@ -247,10 +247,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryVirtualRange deallocate_range)
|
|||
}
|
||||
MM.flush_tlb(m_page_directory, vaddr(), page_count());
|
||||
if (deallocate_range == ShouldDeallocateVirtualMemoryVirtualRange::Yes) {
|
||||
if (m_page_directory->range_allocator().contains(range()))
|
||||
m_page_directory->range_allocator().deallocate(range());
|
||||
else
|
||||
m_page_directory->identity_range_allocator().deallocate(range());
|
||||
m_page_directory->range_allocator().deallocate(range());
|
||||
}
|
||||
m_page_directory = nullptr;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue