mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 01:47:34 +00:00
Kernel: Allocate kernel stacks for threads using the region allocator.
This patch moves away from using kmalloc memory for thread kernel stacks. This reduces pressure on kmalloc (16 KB per thread adds up fast) and prevents kernel stack overflow from scribbling all over random unrelated kernel memory.
This commit is contained in:
parent
8c3ad802d8
commit
c8a216b107
6 changed files with 65 additions and 24 deletions
|
@ -80,7 +80,9 @@ void MemoryManager::initialize_paging()
|
|||
// 1 MB -> 2 MB kmalloc_eternal() space.
|
||||
// 2 MB -> 3 MB kmalloc() space.
|
||||
// 3 MB -> 4 MB Supervisor physical pages (available for allocation!)
|
||||
// 4 MB -> (max) MB Userspace physical pages (available for allocation!)
|
||||
// 4 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
|
||||
// 0xc0000000-0xffffffff Kernel-only linear address space
|
||||
|
||||
for (size_t i = (2 * MB); i < (4 * MB); i += PAGE_SIZE)
|
||||
m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(i), true));
|
||||
|
||||
|
@ -209,6 +211,13 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
|
|||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
|
||||
if (laddr.get() >= 0xc0000000) {
|
||||
for (auto& region : MM.m_kernel_regions) {
|
||||
if (region->contains(laddr))
|
||||
return region;
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
|
||||
for (auto& region : process.m_regions) {
|
||||
if (region->contains(laddr))
|
||||
|
@ -220,6 +229,13 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
|
|||
|
||||
const Region* MemoryManager::region_from_laddr(const Process& process, LinearAddress laddr)
|
||||
{
|
||||
if (laddr.get() >= 0xc0000000) {
|
||||
for (auto& region : MM.m_kernel_regions) {
|
||||
if (region->contains(laddr))
|
||||
return region;
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
|
||||
for (auto& region : process.m_regions) {
|
||||
if (region->contains(laddr))
|
||||
|
@ -379,6 +395,21 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
|||
return PageFaultResponse::ShouldCrash;
|
||||
}
|
||||
|
||||
RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
||||
// FIXME: We need a linear address space allocator.
|
||||
static dword next_laddr = 0xd0000000;
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
LinearAddress laddr(next_laddr);
|
||||
next_laddr += size + 16384;
|
||||
|
||||
auto region = adopt(*new Region(laddr, size, move(name), true, true, false));
|
||||
MM.map_region_at_address(*m_kernel_page_directory, *region, laddr, false);
|
||||
return region;
|
||||
}
|
||||
|
||||
RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
|
@ -610,13 +641,19 @@ void MemoryManager::unregister_vmo(VMObject& vmo)
|
|||
void MemoryManager::register_region(Region& region)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
m_regions.set(®ion);
|
||||
if (region.laddr().get() >= 0xc0000000)
|
||||
m_kernel_regions.set(®ion);
|
||||
else
|
||||
m_user_regions.set(®ion);
|
||||
}
|
||||
|
||||
void MemoryManager::unregister_region(Region& region)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
m_regions.remove(®ion);
|
||||
if (region.laddr().get() >= 0xc0000000)
|
||||
m_kernel_regions.remove(®ion);
|
||||
else
|
||||
m_user_regions.remove(®ion);
|
||||
}
|
||||
|
||||
ProcessPagingScope::ProcessPagingScope(Process& process)
|
||||
|
|
|
@ -68,6 +68,9 @@ public:
|
|||
|
||||
void map_for_kernel(LinearAddress, PhysicalAddress);
|
||||
|
||||
RetainPtr<Region> allocate_kernel_region(size_t, String&& name);
|
||||
void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
|
||||
|
||||
private:
|
||||
MemoryManager();
|
||||
~MemoryManager();
|
||||
|
@ -77,7 +80,6 @@ private:
|
|||
void register_region(Region&);
|
||||
void unregister_region(Region&);
|
||||
|
||||
void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
|
||||
void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
|
||||
|
||||
void initialize_paging();
|
||||
|
@ -211,7 +213,8 @@ private:
|
|||
Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages;
|
||||
|
||||
HashTable<VMObject*> m_vmos;
|
||||
HashTable<Region*> m_regions;
|
||||
HashTable<Region*> m_user_regions;
|
||||
HashTable<Region*> m_kernel_regions;
|
||||
|
||||
size_t m_ram_size { 0 };
|
||||
bool m_quickmap_in_use { false };
|
||||
|
|
|
@ -80,7 +80,11 @@ void VMObject::for_each_region(Callback callback)
|
|||
{
|
||||
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
||||
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
||||
for (auto* region : MM.m_regions) {
|
||||
for (auto* region : MM.m_user_regions) {
|
||||
if (®ion->vmo() == this)
|
||||
callback(*region);
|
||||
}
|
||||
for (auto* region : MM.m_kernel_regions) {
|
||||
if (®ion->vmo() == this)
|
||||
callback(*region);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue