mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 03:58:12 +00:00
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly is undefined behavior. While `[[gnu::packed]]` ensures that reads from and writes to fields of packed structs is a safe operation, the information about the reduced alignment is lost when creating pointers to these values. Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these, even though the doc of `-Waddress-of-packed-member` says that it usually leads to UB. In contrast, x86_64 Clang does flag these, which renders the 64-bit kernel unable to boot. For now, the `address-of-packed-member` warning will only be enabled in the kernel, as it is absolutely crucial there because of KUBSAN, but might get excessively noisy for the userland in the future. Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other flags in the kernel, because flags added via `add_compile_options` come after these, so the `-Wno-address-of-packed-member` in the root would cancel it out.
This commit is contained in:
parent
90caebe96a
commit
fa8507d1ce
3 changed files with 45 additions and 25 deletions
|
@ -351,6 +351,10 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector-strong")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nodefaultlibs -nostdlib")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nodefaultlibs -nostdlib")
|
||||||
|
|
||||||
|
if (USE_CLANG_TOOLCHAIN)
|
||||||
|
add_compile_options(-Waddress-of-packed-member)
|
||||||
|
endif()
|
||||||
|
|
||||||
# Apply any flags that are only available on >= GCC 11.1
|
# Apply any flags that are only available on >= GCC 11.1
|
||||||
if (NOT USE_CLANG_TOOLCHAIN AND CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.1)
|
if (NOT USE_CLANG_TOOLCHAIN AND CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.1)
|
||||||
# Zero any registers used within a function on return (to reduce data lifetime and ROP gadgets).
|
# Zero any registers used within a function on return (to reduce data lifetime and ROP gadgets).
|
||||||
|
|
|
@ -218,10 +218,18 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
|
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
|
||||||
|
|
||||||
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
||||||
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
|
// We have to copy these onto the stack, because we take a reference to these when printing them out,
|
||||||
|
// and doing so on a packed struct field is UB.
|
||||||
auto start_address = PhysicalAddress(mmap->addr);
|
auto address = mmap->addr;
|
||||||
auto length = mmap->len;
|
auto length = mmap->len;
|
||||||
|
ArmedScopeGuard write_back_guard = [&]() {
|
||||||
|
mmap->addr = address;
|
||||||
|
mmap->len = length;
|
||||||
|
};
|
||||||
|
|
||||||
|
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
|
||||||
|
|
||||||
|
auto start_address = PhysicalAddress(address);
|
||||||
switch (mmap->type) {
|
switch (mmap->type) {
|
||||||
case (MULTIBOOT_MEMORY_AVAILABLE):
|
case (MULTIBOOT_MEMORY_AVAILABLE):
|
||||||
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
|
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
|
||||||
|
@ -249,23 +257,23 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// Fix up unaligned memory regions.
|
// Fix up unaligned memory regions.
|
||||||
auto diff = (FlatPtr)mmap->addr % PAGE_SIZE;
|
auto diff = (FlatPtr)address % PAGE_SIZE;
|
||||||
if (diff != 0) {
|
if (diff != 0) {
|
||||||
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", mmap->addr, diff);
|
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
|
||||||
diff = PAGE_SIZE - diff;
|
diff = PAGE_SIZE - diff;
|
||||||
mmap->addr += diff;
|
address += diff;
|
||||||
mmap->len -= diff;
|
length -= diff;
|
||||||
}
|
}
|
||||||
if ((mmap->len % PAGE_SIZE) != 0) {
|
if ((length % PAGE_SIZE) != 0) {
|
||||||
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", mmap->len, mmap->len % PAGE_SIZE);
|
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
|
||||||
mmap->len -= mmap->len % PAGE_SIZE;
|
length -= length % PAGE_SIZE;
|
||||||
}
|
}
|
||||||
if (mmap->len < PAGE_SIZE) {
|
if (length < PAGE_SIZE) {
|
||||||
dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, mmap->len);
|
dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (PhysicalSize page_base = mmap->addr; page_base <= (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
|
for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
|
||||||
auto addr = PhysicalAddress(page_base);
|
auto addr = PhysicalAddress(page_base);
|
||||||
|
|
||||||
// Skip used memory ranges.
|
// Skip used memory ranges.
|
||||||
|
|
|
@ -824,10 +824,10 @@ bool Thread::has_signal_handler(u8 signal) const
|
||||||
return !action.handler_or_sigaction.is_null();
|
return !action.handler_or_sigaction.is_null();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool push_value_on_user_stack(FlatPtr* stack, FlatPtr data)
|
static bool push_value_on_user_stack(FlatPtr& stack, FlatPtr data)
|
||||||
{
|
{
|
||||||
*stack -= sizeof(FlatPtr);
|
stack -= sizeof(FlatPtr);
|
||||||
return copy_to_user((FlatPtr*)*stack, &data);
|
return copy_to_user((FlatPtr*)stack, &data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Thread::resume_from_stopped()
|
void Thread::resume_from_stopped()
|
||||||
|
@ -946,15 +946,15 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
||||||
|
|
||||||
auto setup_stack = [&](RegisterState& state) {
|
auto setup_stack = [&](RegisterState& state) {
|
||||||
#if ARCH(I386)
|
#if ARCH(I386)
|
||||||
FlatPtr* stack = &state.userspace_esp;
|
FlatPtr stack = state.userspace_esp;
|
||||||
FlatPtr old_esp = *stack;
|
FlatPtr old_esp = stack;
|
||||||
FlatPtr ret_eip = state.eip;
|
FlatPtr ret_eip = state.eip;
|
||||||
FlatPtr ret_eflags = state.eflags;
|
FlatPtr ret_eflags = state.eflags;
|
||||||
|
|
||||||
dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to EIP {:p}, ESP {:p}", ret_eip, old_esp);
|
dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to EIP {:p}, ESP {:p}", ret_eip, old_esp);
|
||||||
#elif ARCH(X86_64)
|
#elif ARCH(X86_64)
|
||||||
FlatPtr* stack = &state.userspace_rsp;
|
FlatPtr stack = state.userspace_rsp;
|
||||||
FlatPtr old_rsp = *stack;
|
FlatPtr old_rsp = stack;
|
||||||
FlatPtr ret_rip = state.rip;
|
FlatPtr ret_rip = state.rip;
|
||||||
FlatPtr ret_rflags = state.rflags;
|
FlatPtr ret_rflags = state.rflags;
|
||||||
|
|
||||||
|
@ -967,8 +967,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
||||||
// so we need to account for this here.
|
// so we need to account for this here.
|
||||||
// 56 % 16 = 8, so we only need to take 8 bytes into consideration for
|
// 56 % 16 = 8, so we only need to take 8 bytes into consideration for
|
||||||
// the stack alignment.
|
// the stack alignment.
|
||||||
FlatPtr stack_alignment = (*stack - 8) % 16;
|
FlatPtr stack_alignment = (stack - 8) % 16;
|
||||||
*stack -= stack_alignment;
|
stack -= stack_alignment;
|
||||||
|
|
||||||
push_value_on_user_stack(stack, ret_eflags);
|
push_value_on_user_stack(stack, ret_eflags);
|
||||||
|
|
||||||
|
@ -988,8 +988,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
||||||
// 22 % 2 = 0, so we dont need to take anything into consideration
|
// 22 % 2 = 0, so we dont need to take anything into consideration
|
||||||
// for the alignment.
|
// for the alignment.
|
||||||
// We also are not allowed to touch the thread's red-zone of 128 bytes
|
// We also are not allowed to touch the thread's red-zone of 128 bytes
|
||||||
FlatPtr stack_alignment = *stack % 16;
|
FlatPtr stack_alignment = stack % 16;
|
||||||
*stack -= 128 + stack_alignment;
|
stack -= 128 + stack_alignment;
|
||||||
|
|
||||||
push_value_on_user_stack(stack, ret_rflags);
|
push_value_on_user_stack(stack, ret_rflags);
|
||||||
|
|
||||||
|
@ -1019,7 +1019,15 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
||||||
push_value_on_user_stack(stack, handler_vaddr.get());
|
push_value_on_user_stack(stack, handler_vaddr.get());
|
||||||
push_value_on_user_stack(stack, 0); // push fake return address
|
push_value_on_user_stack(stack, 0); // push fake return address
|
||||||
|
|
||||||
VERIFY((*stack % 16) == 0);
|
// We write back the adjusted stack value into the register state.
|
||||||
|
// We have to do this because we can't just pass around a reference to a packed field, as it's UB.
|
||||||
|
#if ARCH(I386)
|
||||||
|
state.userspace_esp = stack;
|
||||||
|
#else
|
||||||
|
state.userspace_rsp = stack;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
VERIFY((stack % 16) == 0);
|
||||||
};
|
};
|
||||||
|
|
||||||
// We now place the thread state on the userspace stack.
|
// We now place the thread state on the userspace stack.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue