diff --git a/Kernel/Arch/PC/BIOS.cpp b/Kernel/Arch/PC/BIOS.cpp index 2bae4062d5..009df2df3f 100644 --- a/Kernel/Arch/PC/BIOS.cpp +++ b/Kernel/Arch/PC/BIOS.cpp @@ -36,7 +36,7 @@ MappedROM map_bios() MappedROM mapping; mapping.size = 128 * KiB; mapping.paddr = PhysicalAddress(0xe0000); - mapping.region = MM.allocate_kernel_region(mapping.paddr, PAGE_ROUND_UP(mapping.size), {}, Region::Access::Read); + mapping.region = MM.allocate_kernel_region(mapping.paddr, page_round_up(mapping.size), {}, Region::Access::Read); return mapping; } @@ -49,7 +49,7 @@ MappedROM map_ebda() size_t ebda_size = *ebda_length_ptr; MappedROM mapping; - mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), PAGE_ROUND_UP(ebda_size), {}, Region::Access::Read); + mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), page_round_up(ebda_size), {}, Region::Access::Read); mapping.offset = ebda_paddr.offset_in_page(); mapping.size = ebda_size; mapping.paddr = ebda_paddr; diff --git a/Kernel/Devices/BXVGADevice.cpp b/Kernel/Devices/BXVGADevice.cpp index 307f66d2d6..198f799976 100644 --- a/Kernel/Devices/BXVGADevice.cpp +++ b/Kernel/Devices/BXVGADevice.cpp @@ -179,7 +179,7 @@ KResultOr BXVGADevice::mmap(Process& process, FileDescription&, const R return ENODEV; if (offset != 0) return ENXIO; - if (range.size() != PAGE_ROUND_UP(framebuffer_size_in_bytes())) + if (range.size() != page_round_up(framebuffer_size_in_bytes())) return EOVERFLOW; auto vmobject = AnonymousVMObject::create_for_physical_range(m_framebuffer_address, framebuffer_size_in_bytes()); diff --git a/Kernel/Devices/MBVGADevice.cpp b/Kernel/Devices/MBVGADevice.cpp index 999108c554..19d440022c 100644 --- a/Kernel/Devices/MBVGADevice.cpp +++ b/Kernel/Devices/MBVGADevice.cpp @@ -58,7 +58,7 @@ KResultOr MBVGADevice::mmap(Process& process, FileDescription&, const R return ENODEV; if (offset != 0) return ENXIO; - if (range.size() != PAGE_ROUND_UP(framebuffer_size_in_bytes())) + if (range.size() != page_round_up(framebuffer_size_in_bytes())) return EOVERFLOW; auto vmobject = AnonymousVMObject::create_for_physical_range(m_framebuffer_address, framebuffer_size_in_bytes()); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index a06fa8e582..36b7833f38 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -108,7 +108,7 @@ struct KmallocGlobalHeap { // was big enough to likely satisfy the request if (subheap.free_bytes() < allocation_request) { // Looks like we probably need more - size_t memory_size = PAGE_ROUND_UP(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request)); + size_t memory_size = page_round_up(decltype(m_global_heap.m_heap)::calculate_memory_for_bytes(allocation_request)); // Add some more to the new heap. We're already using it for other // allocations not including the original allocation_request // that triggered heap expansion. If we don't allocate diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index a698e8535f..0350e7284e 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -309,7 +309,7 @@ void APIC::do_boot_aps() // Also account for the data appended to: // * aps_to_enable u32 values for ap_cpu_init_stacks // * aps_to_enable u32 values for ap_cpu_init_processor_info_array - auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), PAGE_ROUND_UP(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Region::Access::Read | Region::Access::Write | Region::Access::Execute); + auto apic_startup_region = MM.allocate_kernel_region_identity(PhysicalAddress(0x8000), page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))), {}, Region::Access::Read | Region::Access::Write | Region::Access::Execute); memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast(apic_ap_start), apic_ap_start_size); // Allocate enough stacks for all APs diff --git a/Kernel/KBuffer.h b/Kernel/KBuffer.h index 031c154308..b65c2b1d24 100644 --- a/Kernel/KBuffer.h +++ b/Kernel/KBuffer.h @@ -50,7 +50,7 @@ class KBufferImpl : public RefCounted { public: static RefPtr try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) { - auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, strategy); + auto region = MM.allocate_kernel_region(page_round_up(size), name, access, strategy); if (!region) return nullptr; return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy)); @@ -58,7 +58,7 @@ public: static RefPtr try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve) { - auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, strategy); + auto region = MM.allocate_kernel_region(page_round_up(bytes.size()), name, access, strategy); if (!region) return nullptr; memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size()); @@ -81,7 +81,7 @@ public: bool expand(size_t new_capacity) { - auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy); + auto new_region = MM.allocate_kernel_region(page_round_up(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy); if (!new_region) return false; if (m_region && m_size > 0) diff --git a/Kernel/KBufferBuilder.cpp b/Kernel/KBufferBuilder.cpp index 07f467fc3c..1c8a012dac 100644 --- a/Kernel/KBufferBuilder.cpp +++ b/Kernel/KBufferBuilder.cpp @@ -42,7 +42,7 @@ inline bool KBufferBuilder::check_expand(size_t size) size_t new_buffer_size = m_size + size; if (Checked::addition_would_overflow(new_buffer_size, 1 * MiB)) return false; - new_buffer_size = PAGE_ROUND_UP(new_buffer_size + 1 * MiB); + new_buffer_size = page_round_up(new_buffer_size + 1 * MiB); return m_buffer->expand(new_buffer_size); } diff --git a/Kernel/Net/E1000NetworkAdapter.cpp b/Kernel/Net/E1000NetworkAdapter.cpp index a76e2c8007..7ffb13dbff 100644 --- a/Kernel/Net/E1000NetworkAdapter.cpp +++ b/Kernel/Net/E1000NetworkAdapter.cpp @@ -196,8 +196,8 @@ void E1000NetworkAdapter::detect() E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq) : PCI::Device(address, irq) , m_io_base(PCI::get_BAR1(pci_address()) & ~1) - , m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16), "E1000 RX", Region::Access::Read | Region::Access::Write)) - , m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16), "E1000 TX", Region::Access::Read | Region::Access::Write)) + , m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(page_round_up(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16), "E1000 RX", Region::Access::Read | Region::Access::Write)) + , m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(page_round_up(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16), "E1000 TX", Region::Access::Read | Region::Access::Write)) { set_interface_name("e1k"); @@ -206,7 +206,7 @@ E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq) enable_bus_mastering(pci_address()); size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0); - m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), PAGE_ROUND_UP(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No); + m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), page_round_up(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No); m_mmio_base = m_mmio_region->vaddr(); m_use_mmio = true; m_interrupt_line = PCI::get_interrupt_line(pci_address()); diff --git a/Kernel/Net/RTL8139NetworkAdapter.cpp b/Kernel/Net/RTL8139NetworkAdapter.cpp index d74d746ed8..a3fb64e393 100644 --- a/Kernel/Net/RTL8139NetworkAdapter.cpp +++ b/Kernel/Net/RTL8139NetworkAdapter.cpp @@ -141,8 +141,8 @@ void RTL8139NetworkAdapter::detect() RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address address, u8 irq) : PCI::Device(address, irq) , m_io_base(PCI::get_BAR0(pci_address()) & ~1) - , m_rx_buffer(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(RX_BUFFER_SIZE + PACKET_SIZE_MAX), "RTL8139 RX", Region::Access::Read | Region::Access::Write)) - , m_packet_buffer(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(PACKET_SIZE_MAX), "RTL8139 Packet buffer", Region::Access::Read | Region::Access::Write)) + , m_rx_buffer(MM.allocate_contiguous_kernel_region(page_round_up(RX_BUFFER_SIZE + PACKET_SIZE_MAX), "RTL8139 RX", Region::Access::Read | Region::Access::Write)) + , m_packet_buffer(MM.allocate_contiguous_kernel_region(page_round_up(PACKET_SIZE_MAX), "RTL8139 Packet buffer", Region::Access::Read | Region::Access::Write)) { m_tx_buffers.ensure_capacity(RTL8139_TX_BUFFER_COUNT); set_interface_name("rtl8139"); @@ -161,7 +161,7 @@ RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address address, u8 irq) klog() << "RTL8139: RX buffer: " << m_rx_buffer->physical_page(0)->paddr(); for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) { - m_tx_buffers.append(MM.allocate_contiguous_kernel_region(PAGE_ROUND_UP(TX_BUFFER_SIZE), "RTL8139 TX", Region::Access::Write | Region::Access::Read)); + m_tx_buffers.append(MM.allocate_contiguous_kernel_region(page_round_up(TX_BUFFER_SIZE), "RTL8139 TX", Region::Access::Write | Region::Access::Read)); klog() << "RTL8139: TX buffer " << i << ": " << m_tx_buffers[i]->physical_page(0)->paddr(); } diff --git a/Kernel/PCI/MMIOAccess.cpp b/Kernel/PCI/MMIOAccess.cpp index e848f8c359..8024ffc044 100644 --- a/Kernel/PCI/MMIOAccess.cpp +++ b/Kernel/PCI/MMIOAccess.cpp @@ -51,7 +51,7 @@ private: DeviceConfigurationSpaceMapping::DeviceConfigurationSpaceMapping(Address device_address, const MMIOSegment& mmio_segment) : m_device_address(device_address) - , m_mapped_region(MM.allocate_kernel_region(PAGE_ROUND_UP(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Region::Access::Read | Region::Access::Write).release_nonnull()) + , m_mapped_region(MM.allocate_kernel_region(page_round_up(PCI_MMIO_CONFIG_SPACE_SIZE), "PCI MMIO Device Access", Region::Access::Read | Region::Access::Write).release_nonnull()) { PhysicalAddress segment_lower_addr = mmio_segment.get_paddr(); PhysicalAddress device_physical_mmio_space = segment_lower_addr.offset( @@ -106,7 +106,7 @@ MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg) klog() << "PCI: MCFG, length - " << length << ", revision " << revision; checkup_region->unmap(); - auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), PAGE_ROUND_UP(length) + PAGE_SIZE, "PCI Parsing MCFG", Region::Access::Read | Region::Access::Write); + auto mcfg_region = MM.allocate_kernel_region(p_mcfg.page_base(), page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Region::Access::Read | Region::Access::Write); auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region->vaddr().offset(p_mcfg.offset_in_page()).as_ptr(); dbgln_if(PCI_DEBUG, "PCI: Checking MCFG @ {}, {}", VirtualAddress(&mcfg), PhysicalAddress(p_mcfg.get())); diff --git a/Kernel/Storage/RamdiskController.cpp b/Kernel/Storage/RamdiskController.cpp index 0841073f58..d0b800c1b2 100644 --- a/Kernel/Storage/RamdiskController.cpp +++ b/Kernel/Storage/RamdiskController.cpp @@ -68,7 +68,7 @@ RamdiskController::RamdiskController() size_t count = 0; for (auto used_memory_range : MemoryManager::the().used_memory_ranges()) { if (used_memory_range.type == UsedMemoryRangeType::BootModule) { - size_t length = PAGE_ROUND_UP(used_memory_range.end.get()) - used_memory_range.start.get(); + size_t length = page_round_up(used_memory_range.end.get()) - used_memory_range.start.get(); auto region = MemoryManager::the().allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Region::Access::Read | Region::Access::Write); m_devices.append(RamdiskDevice::create(*this, move(region), 6, count)); count++; diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 5ddc338303..033c8c1f0e 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -167,7 +167,7 @@ static KResultOr get_required_load_range(FileDescription& pro size_t executable_size = inode.size(); - auto region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_ROUND_UP(executable_size), "ELF memory range calculation", Region::Access::Read); + auto region = MM.allocate_kernel_region_with_vmobject(*vmobject, page_round_up(executable_size), "ELF memory range calculation", Region::Access::Read); if (!region) { dbgln("Could not allocate memory for ELF"); return ENOMEM; @@ -203,7 +203,7 @@ static KResultOr get_interpreter_load_offset(const Elf32_Ehdr& main_pro constexpr FlatPtr minimum_interpreter_load_offset_randomization_size = 10 * MiB; auto random_load_offset_in_range([](auto start, auto size) { - return PAGE_ROUND_DOWN(start + get_good_random() % size); + return page_round_down(start + get_good_random() % size); }); if (main_program_header.e_type == ET_DYN) { @@ -263,7 +263,7 @@ static KResultOr load_elf_object(NonnullOwnPtr new_space, Fil size_t executable_size = inode.size(); - auto executable_region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_ROUND_UP(executable_size), "ELF loading", Region::Access::Read); + auto executable_region = MM.allocate_kernel_region_with_vmobject(*vmobject, page_round_up(executable_size), "ELF loading", Region::Access::Read); if (!executable_region) { dbgln("Could not allocate memory for ELF loading"); return ENOMEM; diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 727535ed27..b380858300 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -160,7 +160,10 @@ void* Process::sys$mmap(Userspace user_params) if (alignment & ~PAGE_MASK) return (void*)-EINVAL; - if (!is_user_range(VirtualAddress(addr), PAGE_ROUND_UP(size))) + if (page_round_up_would_wrap(size)) + return (void*)-EINVAL; + + if (!is_user_range(VirtualAddress(addr), page_round_up(size))) return (void*)-EFAULT; String name; @@ -204,7 +207,7 @@ void* Process::sys$mmap(Userspace user_params) Optional range; if (map_randomized) { - range = space().page_directory().range_allocator().allocate_randomized(PAGE_ROUND_UP(size), alignment); + range = space().page_directory().range_allocator().allocate_randomized(page_round_up(size), alignment); } else { range = space().allocate_range(VirtualAddress(addr), size, alignment); if (!range.has_value()) { @@ -272,7 +275,10 @@ int Process::sys$mprotect(void* addr, size_t size, int prot) REQUIRE_PROMISE(prot_exec); } - Range range_to_mprotect = { VirtualAddress((FlatPtr)addr & PAGE_MASK), PAGE_ROUND_UP(size) }; + if (page_round_up_would_wrap(size)) + return -EINVAL; + + Range range_to_mprotect = { VirtualAddress((FlatPtr)addr & PAGE_MASK), page_round_up(size) }; if (!range_to_mprotect.size()) return -EINVAL; @@ -343,7 +349,10 @@ int Process::sys$madvise(void* address, size_t size, int advice) { REQUIRE_PROMISE(stdio); - Range range_to_madvise { VirtualAddress((FlatPtr)address & PAGE_MASK), PAGE_ROUND_UP(size) }; + if (page_round_up_would_wrap(size)) + return -EINVAL; + + Range range_to_madvise { VirtualAddress((FlatPtr)address & PAGE_MASK), page_round_up(size) }; if (!range_to_madvise.size()) return -EINVAL; @@ -415,7 +424,10 @@ int Process::sys$munmap(void* addr, size_t size) if (!size) return -EINVAL; - Range range_to_unmap { VirtualAddress(addr), PAGE_ROUND_UP(size) }; + if (page_round_up_would_wrap(size)) + return -EINVAL; + + Range range_to_unmap { VirtualAddress(addr), page_round_up(size) }; if (!is_user_range(range_to_unmap)) return -EFAULT; diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp index 7676fcad08..4f8799142e 100644 --- a/Kernel/VM/InodeVMObject.cpp +++ b/Kernel/VM/InodeVMObject.cpp @@ -77,7 +77,7 @@ void InodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new InterruptDisabler disabler; - auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE; + auto new_page_count = page_round_up(new_size) / PAGE_SIZE; m_physical_pages.resize(new_page_count); m_dirty_pages.grow(new_page_count, false); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 7e329ef4a6..be32f030bf 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -167,7 +167,7 @@ void MemoryManager::parse_memory_map() // Register used memory regions that we know of. m_used_memory_ranges.ensure_capacity(4); m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) }); - m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(PAGE_ROUND_UP(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) }); + m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) }); if (multiboot_info_ptr->flags & 0x4) { auto* bootmods_start = multiboot_copy_boot_modules_array; diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 53a4cc03a5..b2eba06eae 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -39,8 +39,23 @@ namespace Kernel { -#define PAGE_ROUND_UP(x) ((((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1))) -#define PAGE_ROUND_DOWN(x) (((FlatPtr)(x)) & ~(PAGE_SIZE - 1)) +constexpr bool page_round_up_would_wrap(FlatPtr x) +{ + return x > 0xfffff000u; +} + +constexpr FlatPtr page_round_up(FlatPtr x) +{ + FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)); + // Rounding up >0xffff0000 wraps back to 0. That's never what we want. + ASSERT(x == 0 || rounded != 0); + return rounded; +} + +constexpr FlatPtr page_round_down(FlatPtr x) +{ + return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1); +} inline u32 low_physical_to_virtual(u32 physical) { diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 94bcb677c2..375acd1127 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -154,8 +154,8 @@ bool Region::is_volatile(VirtualAddress vaddr, size_t size) const return false; auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject); - size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE; - size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE; + size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE; + size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE; return is_volatile_range({ first_page_index, last_page_index - first_page_index }); } @@ -171,16 +171,16 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b // partial pages volatile to prevent potentially non-volatile // data to be discarded. So rund up the first page and round // down the last page. - size_t first_page_index = PAGE_ROUND_UP(offset_in_vmobject) / PAGE_SIZE; - size_t last_page_index = PAGE_ROUND_DOWN(offset_in_vmobject + size) / PAGE_SIZE; + size_t first_page_index = page_round_up(offset_in_vmobject) / PAGE_SIZE; + size_t last_page_index = page_round_down(offset_in_vmobject + size) / PAGE_SIZE; if (first_page_index != last_page_index) add_volatile_range({ first_page_index, last_page_index - first_page_index }); } else { // If marking pages as non-volatile, round down the first page // and round up the last page to make sure the beginning and // end of the range doesn't inadvertedly get discarded. - size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE; - size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE; + size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE; + size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE; switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) { case PurgeablePageRanges::RemoveVolatileError::Success: case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange: diff --git a/Kernel/VM/Space.cpp b/Kernel/VM/Space.cpp index 5cb7e28d2e..bf429c240b 100644 --- a/Kernel/VM/Space.cpp +++ b/Kernel/VM/Space.cpp @@ -57,7 +57,7 @@ Space::~Space() Optional Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) { vaddr.mask(PAGE_MASK); - size = PAGE_ROUND_UP(size); + size = page_round_up(size); if (vaddr.is_null()) return page_directory().range_allocator().allocate_anywhere(size, alignment); return page_directory().range_allocator().allocate_specific(vaddr, size); @@ -137,7 +137,7 @@ Region* Space::find_region_from_range(const Range& range) if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) return m_region_lookup_cache.region.unsafe_ptr(); - size_t size = PAGE_ROUND_UP(range.size()); + size_t size = page_round_up(range.size()); for (auto& region : m_regions) { if (region.vaddr() == range.base() && region.size() == size) { m_region_lookup_cache.range = range; diff --git a/Kernel/VM/TypedMapping.h b/Kernel/VM/TypedMapping.h index c0f8cf98b1..e24976537c 100644 --- a/Kernel/VM/TypedMapping.h +++ b/Kernel/VM/TypedMapping.h @@ -47,7 +47,7 @@ template static TypedMapping map_typed(PhysicalAddress paddr, size_t length, u8 access = Region::Access::Read) { TypedMapping table; - table.region = MM.allocate_kernel_region(paddr.page_base(), PAGE_ROUND_UP(length), {}, access); + table.region = MM.allocate_kernel_region(paddr.page_base(), page_round_up(length), {}, access); table.offset = paddr.offset_in_page(); return table; }