mirror of
https://github.com/RGBCube/serenity
synced 2025-05-23 18:35:07 +00:00
Kernel: Use consistent names for kmalloc globals and remove volatile
This commit is contained in:
parent
154a6e69a4
commit
ca4f714d68
3 changed files with 25 additions and 27 deletions
|
@ -823,15 +823,15 @@ Optional<KBuffer> procfs$memstat(InodeIdentifier)
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
KBufferBuilder builder;
|
KBufferBuilder builder;
|
||||||
JsonObjectSerializer<KBufferBuilder> json { builder };
|
JsonObjectSerializer<KBufferBuilder> json { builder };
|
||||||
json.add("kmalloc_allocated", (u32)sum_alloc);
|
json.add("kmalloc_allocated", (u32)g_kmalloc_bytes_allocated);
|
||||||
json.add("kmalloc_available", (u32)sum_free);
|
json.add("kmalloc_available", (u32)g_kmalloc_bytes_free);
|
||||||
json.add("kmalloc_eternal_allocated", (u32)kmalloc_sum_eternal);
|
json.add("kmalloc_eternal_allocated", (u32)g_kmalloc_bytes_eternal);
|
||||||
json.add("user_physical_allocated", MM.user_physical_pages_used());
|
json.add("user_physical_allocated", MM.user_physical_pages_used());
|
||||||
json.add("user_physical_available", MM.user_physical_pages() - MM.user_physical_pages_used());
|
json.add("user_physical_available", MM.user_physical_pages() - MM.user_physical_pages_used());
|
||||||
json.add("super_physical_allocated", MM.super_physical_pages_used());
|
json.add("super_physical_allocated", MM.super_physical_pages_used());
|
||||||
json.add("super_physical_available", MM.super_physical_pages() - MM.super_physical_pages_used());
|
json.add("super_physical_available", MM.super_physical_pages() - MM.super_physical_pages_used());
|
||||||
json.add("kmalloc_call_count", g_kmalloc_call_count);
|
json.add("kmalloc_call_count", (u32)g_kmalloc_call_count);
|
||||||
json.add("kfree_call_count", g_kfree_call_count);
|
json.add("kfree_call_count", (u32)g_kfree_call_count);
|
||||||
slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) {
|
slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) {
|
||||||
auto prefix = String::format("slab_%zu", slab_size);
|
auto prefix = String::format("slab_%zu", slab_size);
|
||||||
json.add(String::format("%s_num_allocated", prefix.characters()), (u32)num_allocated);
|
json.add(String::format("%s_num_allocated", prefix.characters()), (u32)num_allocated);
|
||||||
|
|
|
@ -56,12 +56,11 @@ struct AllocationHeader {
|
||||||
|
|
||||||
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
||||||
|
|
||||||
volatile size_t sum_alloc = 0;
|
size_t g_kmalloc_bytes_allocated = 0;
|
||||||
volatile size_t sum_free = POOL_SIZE;
|
size_t g_kmalloc_bytes_free = POOL_SIZE;
|
||||||
volatile size_t kmalloc_sum_eternal = 0;
|
size_t g_kmalloc_bytes_eternal = 0;
|
||||||
|
size_t g_kmalloc_call_count;
|
||||||
u32 g_kmalloc_call_count;
|
size_t g_kfree_call_count;
|
||||||
u32 g_kfree_call_count;
|
|
||||||
bool g_dump_kmalloc_stacks;
|
bool g_dump_kmalloc_stacks;
|
||||||
|
|
||||||
static u8* s_next_eternal_ptr;
|
static u8* s_next_eternal_ptr;
|
||||||
|
@ -72,9 +71,9 @@ void kmalloc_init()
|
||||||
memset(&alloc_map, 0, sizeof(alloc_map));
|
memset(&alloc_map, 0, sizeof(alloc_map));
|
||||||
memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
|
memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
|
||||||
|
|
||||||
kmalloc_sum_eternal = 0;
|
g_kmalloc_bytes_eternal = 0;
|
||||||
sum_alloc = 0;
|
g_kmalloc_bytes_allocated = 0;
|
||||||
sum_free = POOL_SIZE;
|
g_kmalloc_bytes_free = POOL_SIZE;
|
||||||
|
|
||||||
s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
|
s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
|
||||||
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
|
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
|
||||||
|
@ -85,7 +84,7 @@ void* kmalloc_eternal(size_t size)
|
||||||
void* ptr = s_next_eternal_ptr;
|
void* ptr = s_next_eternal_ptr;
|
||||||
s_next_eternal_ptr += size;
|
s_next_eternal_ptr += size;
|
||||||
ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
|
ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
|
||||||
kmalloc_sum_eternal += size;
|
g_kmalloc_bytes_eternal += size;
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,8 +119,8 @@ inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed)
|
||||||
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
||||||
bitmap_wrapper.set_range(first_chunk, chunks_needed, true);
|
bitmap_wrapper.set_range(first_chunk, chunks_needed, true);
|
||||||
|
|
||||||
sum_alloc += a->allocation_size_in_chunks * CHUNK_SIZE;
|
g_kmalloc_bytes_allocated += a->allocation_size_in_chunks * CHUNK_SIZE;
|
||||||
sum_free -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
g_kmalloc_bytes_free -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
||||||
#ifdef SANITIZE_KMALLOC
|
#ifdef SANITIZE_KMALLOC
|
||||||
memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader));
|
memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader));
|
||||||
#endif
|
#endif
|
||||||
|
@ -141,9 +140,9 @@ void* kmalloc_impl(size_t size)
|
||||||
// We need space for the AllocationHeader at the head of the block.
|
// We need space for the AllocationHeader at the head of the block.
|
||||||
size_t real_size = size + sizeof(AllocationHeader);
|
size_t real_size = size + sizeof(AllocationHeader);
|
||||||
|
|
||||||
if (sum_free < real_size) {
|
if (g_kmalloc_bytes_free < real_size) {
|
||||||
Kernel::dump_backtrace();
|
Kernel::dump_backtrace();
|
||||||
klog() << "kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=" << sum_free << ", real_size=" << real_size;
|
klog() << "kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size;
|
||||||
Kernel::hang();
|
Kernel::hang();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,8 +182,8 @@ void kfree(void* ptr)
|
||||||
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
||||||
bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false);
|
bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false);
|
||||||
|
|
||||||
sum_alloc -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
g_kmalloc_bytes_allocated -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
||||||
sum_free += a->allocation_size_in_chunks * CHUNK_SIZE;
|
g_kmalloc_bytes_free += a->allocation_size_in_chunks * CHUNK_SIZE;
|
||||||
|
|
||||||
#ifdef SANITIZE_KMALLOC
|
#ifdef SANITIZE_KMALLOC
|
||||||
memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE);
|
memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE);
|
||||||
|
|
|
@ -42,12 +42,11 @@ void* krealloc(void*, size_t);
|
||||||
void kfree(void*);
|
void kfree(void*);
|
||||||
void kfree_aligned(void*);
|
void kfree_aligned(void*);
|
||||||
|
|
||||||
extern volatile size_t sum_alloc;
|
extern size_t g_kmalloc_bytes_allocated;
|
||||||
extern volatile size_t sum_free;
|
extern size_t g_kmalloc_bytes_free;
|
||||||
extern volatile size_t kmalloc_sum_eternal;
|
extern size_t g_kmalloc_bytes_eternal;
|
||||||
extern volatile size_t kmalloc_sum_page_aligned;
|
extern size_t g_kmalloc_call_count;
|
||||||
extern u32 g_kmalloc_call_count;
|
extern size_t g_kfree_call_count;
|
||||||
extern u32 g_kfree_call_count;
|
|
||||||
extern bool g_dump_kmalloc_stacks;
|
extern bool g_dump_kmalloc_stacks;
|
||||||
|
|
||||||
inline void* operator new(size_t, void* p) { return p; }
|
inline void* operator new(size_t, void* p) { return p; }
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue