1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 05:37:34 +00:00

Kernel: Assert if rounding-up-to-page-size would wrap around to 0

If we try to align a number above 0xfffff000 to the next multiple of
the page size (4 KiB), it would wrap around to 0. This is most likely
never what we want, so let's assert if that happens.
This commit is contained in:
Andreas Kling 2021-02-14 09:57:19 +01:00
parent 198d641808
commit 09b1b09c19
19 changed files with 67 additions and 40 deletions

View file

@ -77,7 +77,7 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
InterruptDisabler disabler;
auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE;
auto new_page_count = page_round_up(new_size) / PAGE_SIZE;
m_physical_pages.resize(new_page_count);
m_dirty_pages.grow(new_page_count, false);

View file

@ -167,7 +167,7 @@ void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
if (multiboot_info_ptr->flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;

View file

@ -39,8 +39,23 @@
namespace Kernel {
#define PAGE_ROUND_UP(x) ((((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
#define PAGE_ROUND_DOWN(x) (((FlatPtr)(x)) & ~(PAGE_SIZE - 1))
constexpr bool page_round_up_would_wrap(FlatPtr x)
{
return x > 0xfffff000u;
}
constexpr FlatPtr page_round_up(FlatPtr x)
{
FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
// Rounding up >0xffff0000 wraps back to 0. That's never what we want.
ASSERT(x == 0 || rounded != 0);
return rounded;
}
constexpr FlatPtr page_round_down(FlatPtr x)
{
return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
}
inline u32 low_physical_to_virtual(u32 physical)
{

View file

@ -154,8 +154,8 @@ bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
return false;
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
return is_volatile_range({ first_page_index, last_page_index - first_page_index });
}
@ -171,16 +171,16 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b
// partial pages volatile to prevent potentially non-volatile
// data to be discarded. So rund up the first page and round
// down the last page.
size_t first_page_index = PAGE_ROUND_UP(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = PAGE_ROUND_DOWN(offset_in_vmobject + size) / PAGE_SIZE;
size_t first_page_index = page_round_up(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_down(offset_in_vmobject + size) / PAGE_SIZE;
if (first_page_index != last_page_index)
add_volatile_range({ first_page_index, last_page_index - first_page_index });
} else {
// If marking pages as non-volatile, round down the first page
// and round up the last page to make sure the beginning and
// end of the range doesn't inadvertedly get discarded.
size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
case PurgeablePageRanges::RemoveVolatileError::Success:
case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange:

View file

@ -57,7 +57,7 @@ Space::~Space()
Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = PAGE_ROUND_UP(size);
size = page_round_up(size);
if (vaddr.is_null())
return page_directory().range_allocator().allocate_anywhere(size, alignment);
return page_directory().range_allocator().allocate_specific(vaddr, size);
@ -137,7 +137,7 @@ Region* Space::find_region_from_range(const Range& range)
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
return m_region_lookup_cache.region.unsafe_ptr();
size_t size = PAGE_ROUND_UP(range.size());
size_t size = page_round_up(range.size());
for (auto& region : m_regions) {
if (region.vaddr() == range.base() && region.size() == size) {
m_region_lookup_cache.range = range;

View file

@ -47,7 +47,7 @@ template<typename T>
static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, u8 access = Region::Access::Read)
{
TypedMapping<T> table;
table.region = MM.allocate_kernel_region(paddr.page_base(), PAGE_ROUND_UP(length), {}, access);
table.region = MM.allocate_kernel_region(paddr.page_base(), page_round_up(length), {}, access);
table.offset = paddr.offset_in_page();
return table;
}