mirror of
https://github.com/RGBCube/serenity
synced 2025-07-19 18:57:40 +00:00
LibC: Allow opting into malloc() and free() performance event logging
If a program is started with LIBC_PROFILE_MALLOC in the environment, it will now generate PERF_EVENT_MALLOC and PERF_EVENT_FREE.
This commit is contained in:
parent
3879e5b9d4
commit
fa97ff1c83
4 changed files with 32 additions and 4 deletions
|
@ -60,6 +60,7 @@ LIBC_OBJS = \
|
||||||
ELF_OBJS = \
|
ELF_OBJS = \
|
||||||
../LibELF/ELFDynamicObject.o \
|
../LibELF/ELFDynamicObject.o \
|
||||||
../LibELF/ELFDynamicLoader.o \
|
../LibELF/ELFDynamicLoader.o \
|
||||||
|
../LibELF/ELFLoader.o \
|
||||||
../LibELF/ELFImage.o
|
../LibELF/ELFImage.o
|
||||||
|
|
||||||
OBJS = $(AK_OBJS) $(LIBC_OBJS) $(ELF_OBJS)
|
OBJS = $(AK_OBJS) $(LIBC_OBJS) $(ELF_OBJS)
|
||||||
|
|
|
@ -57,6 +57,7 @@ constexpr int number_of_big_blocks_to_keep_around_per_size_class = 8;
|
||||||
static bool s_log_malloc = false;
|
static bool s_log_malloc = false;
|
||||||
static bool s_scrub_malloc = true;
|
static bool s_scrub_malloc = true;
|
||||||
static bool s_scrub_free = true;
|
static bool s_scrub_free = true;
|
||||||
|
static bool s_profiling = false;
|
||||||
static unsigned short size_classes[] = { 8, 16, 32, 64, 128, 252, 508, 1016, 2036, 0 };
|
static unsigned short size_classes[] = { 8, 16, 32, 64, 128, 252, 508, 1016, 2036, 0 };
|
||||||
static constexpr size_t num_size_classes = sizeof(size_classes) / sizeof(unsigned short);
|
static constexpr size_t num_size_classes = sizeof(size_classes) / sizeof(unsigned short);
|
||||||
|
|
||||||
|
@ -170,7 +171,7 @@ static void os_free(void* ptr, size_t size)
|
||||||
assert(rc == 0);
|
assert(rc == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* malloc(size_t size)
|
static void* malloc_impl(size_t size)
|
||||||
{
|
{
|
||||||
LOCKER(malloc_lock());
|
LOCKER(malloc_lock());
|
||||||
|
|
||||||
|
@ -266,7 +267,7 @@ void* malloc(size_t size)
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void* ptr)
|
static void free_impl(void* ptr)
|
||||||
{
|
{
|
||||||
ScopedValueRollback rollback(errno);
|
ScopedValueRollback rollback(errno);
|
||||||
|
|
||||||
|
@ -352,6 +353,21 @@ void free(void* ptr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* malloc(size_t size)
|
||||||
|
{
|
||||||
|
void* ptr = malloc_impl(size);
|
||||||
|
if (s_profiling)
|
||||||
|
perf_event(PERF_EVENT_MALLOC, size, reinterpret_cast<uintptr_t>(ptr));
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free(void* ptr)
|
||||||
|
{
|
||||||
|
if (s_profiling)
|
||||||
|
perf_event(PERF_EVENT_FREE, reinterpret_cast<uintptr_t>(ptr), 0);
|
||||||
|
free_impl(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
void* calloc(size_t count, size_t size)
|
void* calloc(size_t count, size_t size)
|
||||||
{
|
{
|
||||||
size_t new_size = count * size;
|
size_t new_size = count * size;
|
||||||
|
@ -396,6 +412,8 @@ void __malloc_init()
|
||||||
s_scrub_free = false;
|
s_scrub_free = false;
|
||||||
if (getenv("LIBC_LOG_MALLOC"))
|
if (getenv("LIBC_LOG_MALLOC"))
|
||||||
s_log_malloc = true;
|
s_log_malloc = true;
|
||||||
|
if (getenv("LIBC_PROFILE_MALLOC"))
|
||||||
|
s_profiling = true;
|
||||||
|
|
||||||
g_allocators = (Allocator*)mmap_with_name(nullptr, sizeof(Allocator) * num_size_classes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, "LibC Allocators");
|
g_allocators = (Allocator*)mmap_with_name(nullptr, sizeof(Allocator) * num_size_classes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, "LibC Allocators");
|
||||||
for (size_t i = 0; i < num_size_classes; ++i) {
|
for (size_t i = 0; i < num_size_classes; ++i) {
|
||||||
|
@ -404,10 +422,9 @@ void __malloc_init()
|
||||||
}
|
}
|
||||||
|
|
||||||
g_big_allocators = (BigAllocator*)mmap_with_name(nullptr, sizeof(BigAllocator), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, "LibC BigAllocators");
|
g_big_allocators = (BigAllocator*)mmap_with_name(nullptr, sizeof(BigAllocator), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, "LibC BigAllocators");
|
||||||
new (g_big_allocators) (BigAllocator);
|
new (g_big_allocators)(BigAllocator);
|
||||||
|
|
||||||
// We could mprotect the mmaps here with atexit, but, since this method is called in _start before
|
// We could mprotect the mmaps here with atexit, but, since this method is called in _start before
|
||||||
// _init and __init_array entries, our mprotect method would always be the last thing run before _exit.
|
// _init and __init_array entries, our mprotect method would always be the last thing run before _exit.
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,4 +79,9 @@ int purge(int mode)
|
||||||
__RETURN_WITH_ERRNO(rc, rc, -1);
|
__RETURN_WITH_ERRNO(rc, rc, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int perf_event(int type, uintptr_t arg1, uintptr_t arg2)
|
||||||
|
{
|
||||||
|
int rc = syscall(SC_perf_event, type, arg1, arg2);
|
||||||
|
__RETURN_WITH_ERRNO(rc, rc, -1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,4 +56,9 @@ int futex(int32_t* userspace_address, int futex_op, int32_t value, const struct
|
||||||
|
|
||||||
int purge(int mode);
|
int purge(int mode);
|
||||||
|
|
||||||
|
#define PERF_EVENT_MALLOC 1
|
||||||
|
#define PERF_EVENT_FREE 2
|
||||||
|
|
||||||
|
int perf_event(int type, uintptr_t arg1, uintptr_t arg2);
|
||||||
|
|
||||||
__END_DECLS
|
__END_DECLS
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue