1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 12:38:12 +00:00

Give each task its own page directory.

This isn't finished but I'll commit as I go. We need to get to where context
switching only needs to change CR3 and everything's ready to go.

My basic idea is:
- The first 4 kB is off-limits. This catches null dereferences.
- Up to the 4 MB mark is identity-mapped and kernel-only.
- The rest is available to everyone!

While the first 4 MB is only available to the kernel, it's still mapped in
every process, for convenience when entering the kernel.
This commit is contained in:
Andreas Kling 2018-11-01 09:01:51 +01:00
parent cddd2f37e9
commit 1da0a7c949
7 changed files with 47 additions and 15 deletions

View file

@ -22,6 +22,7 @@ typedef struct
#define CHUNK_SIZE 128
#define POOL_SIZE (1024 * 1024)
#define PAGE_ALIGNED_BASE_PHYSICAL 0x380000
#define ETERNAL_BASE_PHYSICAL 0x300000
#define BASE_PHYS 0x200000
@ -30,8 +31,10 @@ PRIVATE BYTE alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
volatile DWORD sum_alloc = 0;
volatile DWORD sum_free = POOL_SIZE;
volatile size_t kmalloc_sum_eternal = 0;
volatile size_t kmalloc_sum_page_aligned = 0;
static byte* s_next_eternal_ptr;
static byte* s_next_page_aligned_ptr;
bool is_kmalloc_address(void* ptr)
{
@ -47,10 +50,12 @@ kmalloc_init()
memset( (void *)BASE_PHYS, 0, POOL_SIZE );
kmalloc_sum_eternal = 0;
kmalloc_sum_page_aligned = 0;
sum_alloc = 0;
sum_free = POOL_SIZE;
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
}
void* kmalloc_eternal(size_t size)
@ -61,6 +66,16 @@ void* kmalloc_eternal(size_t size)
return ptr;
}
void* kmalloc_page_aligned(size_t size)
{
ASSERT((size % 4096) == 0);
void* ptr = s_next_page_aligned_ptr;
s_next_page_aligned_ptr += size;
kmalloc_sum_page_aligned += size;
return ptr;
}
PUBLIC void *
kmalloc( DWORD size )
{