1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 01:47:35 +00:00

Kernel: Propagate overflow errors from Memory::page_round_up

Fixes #11402.
This commit is contained in:
Guilherme Goncalves 2021-12-24 11:22:11 -03:00 committed by Andreas Kling
parent 11599a3342
commit 33b78915d3
31 changed files with 112 additions and 100 deletions

View file

@ -131,7 +131,7 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
ErrorOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = page_round_up(size);
size = TRY(page_round_up(size));
if (vaddr.is_null())
return page_directory().range_allocator().try_allocate_anywhere(size, alignment);
return page_directory().range_allocator().try_allocate_specific(vaddr, size);
@ -222,8 +222,8 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
if (!found_region)
return nullptr;
auto& region = *found_region;
size_t size = page_round_up(range.size());
if (region->size() != size)
auto rounded_range_size = page_round_up(range.size());
if (rounded_range_size.is_error() || region->size() != rounded_range_size.value())
return nullptr;
m_region_lookup_cache.range = range;
m_region_lookup_cache.region = *region;

View file

@ -45,6 +45,14 @@ __attribute__((section(".super_pages"))) static u8 super_pages[4 * MiB];
namespace Kernel::Memory {
ErrorOr<FlatPtr> page_round_up(FlatPtr x)
{
if (x > (explode_byte(0xFF) & ~0xFFF)) {
return Error::from_errno(EINVAL);
}
return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
}
// NOTE: We can NOT use Singleton for this class, because
// MemoryManager::initialize is called *before* global constructors are
// run. If we do, then Singleton would get re-initialized, causing
@ -137,7 +145,7 @@ void MemoryManager::unmap_text_after_init()
SpinlockLocker mm_lock(s_mm_lock);
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors();
// Unmap the entire .unmap_after_init section
for (auto i = start; i < end; i += PAGE_SIZE) {
@ -155,7 +163,7 @@ UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
SpinlockLocker page_lock(kernel_page_directory().get_lock());
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms).release_value_but_fixme_should_propagate_errors();
for (auto i = start; i < end; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@ -213,7 +221,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
if (multiboot_flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
@ -380,7 +388,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Calculate how many bytes the array will consume
auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
auto physical_page_array_pages = page_round_up(physical_page_array_size) / PAGE_SIZE;
auto physical_page_array_pages = page_round_up(physical_page_array_size).release_value_but_fixme_should_propagate_errors() / PAGE_SIZE;
VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
// Calculate how many page tables we will need to be able to map them all

View file

@ -26,16 +26,7 @@ struct KmallocGlobalData;
namespace Kernel::Memory {
constexpr bool page_round_up_would_wrap(FlatPtr x)
{
return x > (explode_byte(0xFF) & ~0xFFF);
}
constexpr FlatPtr page_round_up(FlatPtr x)
{
VERIFY(!page_round_up_would_wrap(x));
return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
}
ErrorOr<FlatPtr> page_round_up(FlatPtr x);
constexpr FlatPtr page_round_down(FlatPtr x)
{
@ -340,17 +331,11 @@ inline bool PhysicalPage::is_lazy_committed_page() const
inline ErrorOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
{
if (Memory::page_round_up_would_wrap(size))
return EINVAL;
if ((address + size) < address)
return EINVAL;
if (Memory::page_round_up_would_wrap(address + size))
return EINVAL;
auto base = VirtualAddress { address }.page_base();
auto end = Memory::page_round_up(address + size);
auto end = TRY(Memory::page_round_up(address + size));
return Memory::VirtualRange { base, end - base.get() };
}

View file

@ -11,7 +11,7 @@
namespace Kernel::Memory {
RingBuffer::RingBuffer(String region_name, size_t capacity)
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write).release_value())
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity).release_value_but_fixme_should_propagate_errors(), move(region_name), Region::Access::Read | Region::Access::Write).release_value())
, m_capacity_in_bytes(capacity)
{
}

View file

@ -21,7 +21,7 @@ RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest&
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
: m_vm_object(move(vm_object))
{
auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)).release_value_but_fixme_should_propagate_errors(), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
if (region_or_error.is_error())
TODO();
m_dma_region = region_or_error.release_value();

View file

@ -27,7 +27,7 @@ template<typename T>
static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read)
{
TypedMapping<T> table;
size_t mapping_length = page_round_up(paddr.offset_in_page() + length);
size_t mapping_length = page_round_up(paddr.offset_in_page() + length).release_value_but_fixme_should_propagate_errors();
auto region_or_error = MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access);
if (region_or_error.is_error())
TODO();

View file

@ -38,18 +38,11 @@ VirtualRange VirtualRange::intersect(VirtualRange const& other) const
ErrorOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
{
if (page_round_up_would_wrap(size))
return EINVAL;
if ((address + size) < address)
return EINVAL;
if (page_round_up_would_wrap(address + size))
return EINVAL;
auto base = VirtualAddress { address }.page_base();
auto end = page_round_up(address + size);
auto end = TRY(page_round_up(address + size));
return VirtualRange { base, end - base.get() };
}