From 4bfd6e41b952c2583c6941e58dea3e5cc032b9f3 Mon Sep 17 00:00:00 2001 From: sin-ack Date: Sun, 15 Aug 2021 09:07:59 +0000 Subject: [PATCH] Kernel: Make Kernel::VMObject allocation functions return KResultOr This makes for nicer handling of errors compared to checking whether a RefPtr is null. Additionally, this will give way to return different types of errors in the future. --- Kernel/Bus/USB/UHCIController.cpp | 27 ++++++--- Kernel/Bus/USB/UHCIController.h | 2 +- Kernel/Devices/KCOVInstance.cpp | 7 ++- Kernel/Devices/MemoryDevice.cpp | 9 +-- Kernel/Devices/SB16.cpp | 9 +-- Kernel/Graphics/Bochs/GraphicsAdapter.cpp | 3 +- Kernel/Graphics/FramebufferDevice.cpp | 49 ++++++++++------ Kernel/Graphics/FramebufferDevice.h | 2 +- .../Graphics/Intel/NativeGraphicsAdapter.cpp | 3 +- Kernel/Graphics/VGACompatibleAdapter.cpp | 3 +- .../Graphics/VirtIOGPU/FrameBufferDevice.cpp | 43 ++++++++++---- Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h | 2 +- Kernel/Interrupts/APIC.cpp | 8 ++- Kernel/Memory/AddressSpace.cpp | 8 +-- Kernel/Memory/AnonymousVMObject.cpp | 57 +++++++++++-------- Kernel/Memory/AnonymousVMObject.h | 12 ++-- Kernel/Memory/MemoryManager.cpp | 19 ++++--- Kernel/Memory/PrivateInodeVMObject.cpp | 4 +- Kernel/Memory/PrivateInodeVMObject.h | 2 +- Kernel/Memory/Region.cpp | 6 +- Kernel/Memory/ScatterGatherList.cpp | 8 ++- Kernel/Memory/SharedInodeVMObject.cpp | 4 +- Kernel/Memory/SharedInodeVMObject.h | 2 +- Kernel/Memory/VMObject.h | 2 +- Kernel/Syscalls/anon_create.cpp | 8 +-- Kernel/Syscalls/mmap.cpp | 17 ++++-- 26 files changed, 194 insertions(+), 122 deletions(-) diff --git a/Kernel/Bus/USB/UHCIController.cpp b/Kernel/Bus/USB/UHCIController.cpp index 63e668a92f..8a1f6c1fab 100644 --- a/Kernel/Bus/USB/UHCIController.cpp +++ b/Kernel/Bus/USB/UHCIController.cpp @@ -119,13 +119,17 @@ KResult UHCIController::reset() } // Let's allocate the physical page for the Frame List (which is 4KiB aligned) - auto framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE); - m_framelist = MM.allocate_kernel_region_with_vmobject(*framelist_vmobj, PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write); + auto maybe_framelist_vmobj = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(PAGE_SIZE); + if (maybe_framelist_vmobj.is_error()) + return maybe_framelist_vmobj.error(); + + m_framelist = MM.allocate_kernel_region_with_vmobject(maybe_framelist_vmobj.release_value(), PAGE_SIZE, "UHCI Framelist", Memory::Region::Access::Write); dbgln("UHCI: Allocated framelist at physical address {}", m_framelist->physical_page(0)->paddr()); dbgln("UHCI: Framelist is at virtual address {}", m_framelist->vaddr()); write_sofmod(64); // 1mS frame time - create_structures(); + if (auto result = create_structures(); result.is_error()) + return result; setup_schedule(); write_flbaseadd(m_framelist->physical_page(0)->paddr().get()); // Frame list (physical) address @@ -139,12 +143,15 @@ KResult UHCIController::reset() return KSuccess; } -UNMAP_AFTER_INIT void UHCIController::create_structures() +UNMAP_AFTER_INIT KResult UHCIController::create_structures() { // Let's allocate memory for both the QH and TD pools // First the QH pool and all of the Interrupt QH's - auto qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE); - m_qh_pool = MM.allocate_kernel_region_with_vmobject(*qh_pool_vmobject, 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write); + auto maybe_qh_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE); + if (maybe_qh_pool_vmobject.is_error()) + return maybe_qh_pool_vmobject.error(); + + m_qh_pool = MM.allocate_kernel_region_with_vmobject(maybe_qh_pool_vmobject.release_value(), 2 * PAGE_SIZE, "UHCI Queue Head Pool", Memory::Region::Access::Write); memset(m_qh_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE); // Zero out both pages // Let's populate our free qh list (so we have some we can allocate later on) @@ -163,8 +170,10 @@ UNMAP_AFTER_INIT void UHCIController::create_structures() m_dummy_qh = allocate_queue_head(); // Now the Transfer Descriptor pool - auto td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE); - m_td_pool = MM.allocate_kernel_region_with_vmobject(*td_pool_vmobject, 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write); + auto maybe_td_pool_vmobject = Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(2 * PAGE_SIZE); + if (maybe_td_pool_vmobject.is_error()) + return maybe_td_pool_vmobject.error(); + m_td_pool = MM.allocate_kernel_region_with_vmobject(maybe_td_pool_vmobject.release_value(), 2 * PAGE_SIZE, "UHCI Transfer Descriptor Pool", Memory::Region::Access::Write); memset(m_td_pool->vaddr().as_ptr(), 0, 2 * PAGE_SIZE); // Set up the Isochronous Transfer Descriptor list @@ -209,6 +218,8 @@ UNMAP_AFTER_INIT void UHCIController::create_structures() dbgln(" qh_pool: {}, length: {}", PhysicalAddress(m_qh_pool->physical_page(0)->paddr()), m_qh_pool->range().size()); dbgln(" td_pool: {}, length: {}", PhysicalAddress(m_td_pool->physical_page(0)->paddr()), m_td_pool->range().size()); } + + return KSuccess; } UNMAP_AFTER_INIT void UHCIController::setup_schedule() diff --git a/Kernel/Bus/USB/UHCIController.h b/Kernel/Bus/USB/UHCIController.h index 88125482e7..c156611a09 100644 --- a/Kernel/Bus/USB/UHCIController.h +++ b/Kernel/Bus/USB/UHCIController.h @@ -69,7 +69,7 @@ private: virtual bool handle_irq(const RegisterState&) override; - void create_structures(); + KResult create_structures(); void setup_schedule(); size_t poll_transfer_queue(QueueHead& transfer_queue); diff --git a/Kernel/Devices/KCOVInstance.cpp b/Kernel/Devices/KCOVInstance.cpp index 389097797e..e4613b6588 100644 --- a/Kernel/Devices/KCOVInstance.cpp +++ b/Kernel/Devices/KCOVInstance.cpp @@ -28,10 +28,11 @@ KResult KCOVInstance::buffer_allocate(size_t buffer_size_in_entries) // - we allocate one kernel region using that vmobject // - when an mmap call comes in, we allocate another userspace region, // backed by the same vmobject - this->vmobject = Memory::AnonymousVMObject::try_create_with_size( + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_size( this->m_buffer_size_in_bytes, AllocationStrategy::AllocateNow); - if (!this->vmobject) - return ENOMEM; + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + this->vmobject = maybe_vmobject.release_value(); this->m_kernel_region = MM.allocate_kernel_region_with_vmobject( *this->vmobject, this->m_buffer_size_in_bytes, String::formatted("kcov_{}", this->m_pid), diff --git a/Kernel/Devices/MemoryDevice.cpp b/Kernel/Devices/MemoryDevice.cpp index 5650fd158e..09575d02e3 100644 --- a/Kernel/Devices/MemoryDevice.cpp +++ b/Kernel/Devices/MemoryDevice.cpp @@ -47,13 +47,14 @@ KResultOr MemoryDevice::mmap(Process& process, FileDescription& return EINVAL; } - auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size()); - if (!vmobject) - return ENOMEM; + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(viewed_address, range.size()); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + dbgln("MemoryDevice: Mapped physical memory at {} for range of {} bytes", viewed_address, range.size()); return process.address_space().allocate_region_with_vmobject( range, - vmobject.release_nonnull(), + maybe_vmobject.release_value(), 0, "Mapped Physical Memory", prot, diff --git a/Kernel/Devices/SB16.cpp b/Kernel/Devices/SB16.cpp index f3b7955dcd..e78360812f 100644 --- a/Kernel/Devices/SB16.cpp +++ b/Kernel/Devices/SB16.cpp @@ -238,10 +238,11 @@ KResultOr SB16::write(FileDescription&, u64, const UserOrKernelBuffer& d if (!page) return ENOMEM; auto nonnull_page = page.release_nonnull(); - auto vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 }); - if (!vmobject) - return ENOMEM; - m_dma_region = MM.allocate_kernel_region_with_vmobject(*vmobject, PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write); + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages({ &nonnull_page, 1 }); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + + m_dma_region = MM.allocate_kernel_region_with_vmobject(maybe_vmobject.release_value(), PAGE_SIZE, "SB16 DMA buffer", Memory::Region::Access::Write); if (!m_dma_region) return ENOMEM; } diff --git a/Kernel/Graphics/Bochs/GraphicsAdapter.cpp b/Kernel/Graphics/Bochs/GraphicsAdapter.cpp index baacaf12cf..01a5f70f84 100644 --- a/Kernel/Graphics/Bochs/GraphicsAdapter.cpp +++ b/Kernel/Graphics/Bochs/GraphicsAdapter.cpp @@ -88,7 +88,8 @@ UNMAP_AFTER_INIT void BochsGraphicsAdapter::initialize_framebuffer_devices() { // FIXME: Find a better way to determine default resolution... m_framebuffer_device = FramebufferDevice::create(*this, 0, PhysicalAddress(PCI::get_BAR0(pci_address()) & 0xfffffff0), 1024, 768, 1024 * sizeof(u32)); - m_framebuffer_device->initialize(); + // FIXME: Would be nice to be able to return a KResult here. + VERIFY(!m_framebuffer_device->initialize().is_error()); } GraphicsDevice::Type BochsGraphicsAdapter::type() const diff --git a/Kernel/Graphics/FramebufferDevice.cpp b/Kernel/Graphics/FramebufferDevice.cpp index 74a7249d8b..ae7bece354 100644 --- a/Kernel/Graphics/FramebufferDevice.cpp +++ b/Kernel/Graphics/FramebufferDevice.cpp @@ -36,18 +36,20 @@ KResultOr FramebufferDevice::mmap(Process& process, FileDescrip if (range.size() != Memory::page_round_up(framebuffer_size_in_bytes())) return EOVERFLOW; - auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); - if (!vmobject) - return ENOMEM; - m_userspace_real_framebuffer_vmobject = vmobject; + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + m_userspace_real_framebuffer_vmobject = maybe_vmobject.release_value(); - m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); - if (!m_real_framebuffer_vmobject) - return ENOMEM; + auto maybe_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); + if (maybe_real_framebuffer_vmobject.is_error()) + return maybe_real_framebuffer_vmobject.error(); + m_real_framebuffer_vmobject = maybe_real_framebuffer_vmobject.release_value(); - m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow); - if (!m_swapped_framebuffer_vmobject) - return ENOMEM; + auto maybe_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow); + if (maybe_swapped_framebuffer_vmobject.is_error()) + return maybe_swapped_framebuffer_vmobject.error(); + m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value(); m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite); if (!m_real_framebuffer_region) @@ -107,16 +109,29 @@ String FramebufferDevice::device_name() const return String::formatted("fb{}", minor()); } -UNMAP_AFTER_INIT void FramebufferDevice::initialize() +UNMAP_AFTER_INIT KResult FramebufferDevice::initialize() { - m_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); - VERIFY(m_real_framebuffer_vmobject); + // FIXME: Would be nice to be able to unify this with mmap above, but this + // function is UNMAP_AFTER_INIT for the time being. + auto maybe_real_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_size_in_bytes())); + if (maybe_real_framebuffer_vmobject.is_error()) + return maybe_real_framebuffer_vmobject.error(); + m_real_framebuffer_vmobject = maybe_real_framebuffer_vmobject.release_value(); + + auto maybe_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow); + if (maybe_swapped_framebuffer_vmobject.is_error()) + return maybe_swapped_framebuffer_vmobject.error(); + m_swapped_framebuffer_vmobject = maybe_swapped_framebuffer_vmobject.release_value(); + m_real_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer", Memory::Region::Access::ReadWrite); - VERIFY(m_real_framebuffer_region); - m_swapped_framebuffer_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_size_in_bytes()), AllocationStrategy::AllocateNow); - VERIFY(m_swapped_framebuffer_vmobject); + if (!m_real_framebuffer_region) + return ENOMEM; + m_swapped_framebuffer_region = MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_size_in_bytes()), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite); - VERIFY(m_swapped_framebuffer_region); + if (!m_swapped_framebuffer_region) + return ENOMEM; + + return KSuccess; } UNMAP_AFTER_INIT FramebufferDevice::FramebufferDevice(const GraphicsDevice& adapter, size_t output_port_index, PhysicalAddress addr, size_t width, size_t height, size_t pitch) diff --git a/Kernel/Graphics/FramebufferDevice.h b/Kernel/Graphics/FramebufferDevice.h index 145b7c7010..c4bb086c2d 100644 --- a/Kernel/Graphics/FramebufferDevice.h +++ b/Kernel/Graphics/FramebufferDevice.h @@ -34,7 +34,7 @@ public: size_t framebuffer_size_in_bytes() const; virtual ~FramebufferDevice() {}; - void initialize(); + KResult initialize(); private: // ^File diff --git a/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp b/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp index cd4cd43e89..dbab18641f 100644 --- a/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp +++ b/Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp @@ -639,6 +639,7 @@ void IntelNativeGraphicsAdapter::initialize_framebuffer_devices() VERIFY(m_framebuffer_height != 0); VERIFY(m_framebuffer_width != 0); m_framebuffer_device = FramebufferDevice::create(*this, 0, address, m_framebuffer_width, m_framebuffer_height, m_framebuffer_pitch); - m_framebuffer_device->initialize(); + // FIXME: Would be nice to be able to return a KResult here. + VERIFY(!m_framebuffer_device->initialize().is_error()); } } diff --git a/Kernel/Graphics/VGACompatibleAdapter.cpp b/Kernel/Graphics/VGACompatibleAdapter.cpp index 1f30fc10fc..22dc7f07ed 100644 --- a/Kernel/Graphics/VGACompatibleAdapter.cpp +++ b/Kernel/Graphics/VGACompatibleAdapter.cpp @@ -33,7 +33,8 @@ UNMAP_AFTER_INIT void VGACompatibleAdapter::initialize_framebuffer_devices() VERIFY(m_framebuffer_height != 0); VERIFY(m_framebuffer_pitch != 0); m_framebuffer_device = FramebufferDevice::create(*this, 0, m_framebuffer_address, m_framebuffer_width, m_framebuffer_height, m_framebuffer_pitch); - m_framebuffer_device->initialize(); + // FIXME: Would be nice to be able to return KResult here. + VERIFY(!m_framebuffer_device->initialize().is_error()); } UNMAP_AFTER_INIT VGACompatibleAdapter::VGACompatibleAdapter(PCI::Address address) diff --git a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp index b338c6528f..c488d41678 100644 --- a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp +++ b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.cpp @@ -15,15 +15,18 @@ FrameBufferDevice::FrameBufferDevice(GPU& virtio_gpu, ScanoutID scanout) , m_gpu(virtio_gpu) , m_scanout(scanout) { - if (display_info().enabled) - create_framebuffer(); + if (display_info().enabled) { + // FIXME: This should be in a place where we can handle allocation failures. + auto result = create_framebuffer(); + VERIFY(!result.is_error()); + } } FrameBufferDevice::~FrameBufferDevice() { } -void FrameBufferDevice::create_framebuffer() +KResult FrameBufferDevice::create_framebuffer() { // First delete any existing framebuffers to free the memory first m_framebuffer = nullptr; @@ -40,12 +43,17 @@ void FrameBufferDevice::create_framebuffer() for (auto i = 0u; i < num_needed_pages; ++i) { pages.append(write_sink_page); } - m_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()); + auto maybe_framebuffer_sink_vmobject = Memory::AnonymousVMObject::try_create_with_physical_pages(pages.span()); + if (maybe_framebuffer_sink_vmobject.is_error()) + return maybe_framebuffer_sink_vmobject.error(); + m_framebuffer_sink_vmobject = maybe_framebuffer_sink_vmobject.release_value(); MutexLocker locker(m_gpu.operation_lock()); m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load()); create_buffer(m_main_buffer, 0, m_buffer_size); create_buffer(m_back_buffer, m_buffer_size, m_buffer_size); + + return KSuccess; } void FrameBufferDevice::create_buffer(Buffer& buffer, size_t framebuffer_offset, size_t framebuffer_size) @@ -124,7 +132,10 @@ bool FrameBufferDevice::try_to_set_resolution(size_t width, size_t height) .width = (u32)width, .height = (u32)height, }; - create_framebuffer(); + + // FIXME: Would be nice to be able to return KResultOr here. + if (auto result = create_framebuffer(); result.is_error()) + return false; return true; } @@ -255,9 +266,18 @@ KResultOr FrameBufferDevice::mmap(Process& process, FileDescrip if (m_userspace_mmap_region) return ENOMEM; - auto vmobject = m_are_writes_active ? m_framebuffer->vmobject().try_clone() : m_framebuffer_sink_vmobject; - if (vmobject.is_null()) - return ENOMEM; + RefPtr vmobject; + if (m_are_writes_active) { + auto maybe_vmobject = m_framebuffer->vmobject().try_clone(); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + + vmobject = maybe_vmobject.release_value(); + } else { + vmobject = m_framebuffer_sink_vmobject; + if (vmobject.is_null()) + return ENOMEM; + } auto result = process.address_space().allocate_region_with_vmobject( range, @@ -277,9 +297,10 @@ void FrameBufferDevice::deactivate_writes() m_are_writes_active = false; if (m_userspace_mmap_region) { auto* region = m_userspace_mmap_region.unsafe_ptr(); - auto vm_object = m_framebuffer_sink_vmobject->try_clone(); - VERIFY(vm_object); - region->set_vmobject(vm_object.release_nonnull()); + auto maybe_vm_object = m_framebuffer_sink_vmobject->try_clone(); + // FIXME: Would be nice to be able to return a KResult here. + VERIFY(!maybe_vm_object.is_error()); + region->set_vmobject(maybe_vm_object.release_value()); region->remap(); } set_buffer(0); diff --git a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h index f0a2b81231..588d2f3ce1 100644 --- a/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h +++ b/Kernel/Graphics/VirtIOGPU/FrameBufferDevice.h @@ -56,7 +56,7 @@ private: Protocol::DisplayInfoResponse::Display const& display_info() const; Protocol::DisplayInfoResponse::Display& display_info(); - void create_framebuffer(); + KResult create_framebuffer(); void create_buffer(Buffer&, size_t, size_t); void set_buffer(int); diff --git a/Kernel/Interrupts/APIC.cpp b/Kernel/Interrupts/APIC.cpp index 9d21d5daa7..db17743b6a 100644 --- a/Kernel/Interrupts/APIC.cpp +++ b/Kernel/Interrupts/APIC.cpp @@ -277,11 +277,13 @@ UNMAP_AFTER_INIT bool APIC::init_bsp() UNMAP_AFTER_INIT static NonnullOwnPtr create_identity_mapped_region(PhysicalAddress paddr, size_t size) { - auto vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size); - VERIFY(vmobject); + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size); + // FIXME: Would be nice to be able to return a KResultOr from here. + VERIFY(!maybe_vmobject.is_error()); + auto region = MM.allocate_kernel_region_with_vmobject( Memory::VirtualRange { VirtualAddress { static_cast(paddr.get()) }, size }, - vmobject.release_nonnull(), + maybe_vmobject.release_value(), {}, Memory::Region::Access::ReadWriteExecute); VERIFY(region); diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index 653160479f..57f0cea656 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -170,10 +170,10 @@ KResultOr AddressSpace::try_allocate_split_region(Region const& source_ KResultOr AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy) { VERIFY(range.is_valid()); - auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); - if (!vmobject) - return ENOMEM; - auto region = Region::try_create_user_accessible(range, vmobject.release_nonnull(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false); + auto maybe_vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + auto region = Region::try_create_user_accessible(range, maybe_vmobject.release_value(), 0, KString::try_create(name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false); if (!region) return ENOMEM; if (!region->map(page_directory())) diff --git a/Kernel/Memory/AnonymousVMObject.cpp b/Kernel/Memory/AnonymousVMObject.cpp index d576be58e3..4d73523554 100644 --- a/Kernel/Memory/AnonymousVMObject.cpp +++ b/Kernel/Memory/AnonymousVMObject.cpp @@ -13,7 +13,7 @@ namespace Kernel::Memory { -RefPtr AnonymousVMObject::try_clone() +KResultOr> AnonymousVMObject::try_clone() { // We need to acquire our lock so we copy a sane state ScopedSpinLock lock(m_lock); @@ -21,9 +21,11 @@ RefPtr AnonymousVMObject::try_clone() if (is_purgeable() && is_volatile()) { // If this object is purgeable+volatile, create a new zero-filled purgeable+volatile // object, effectively "pre-purging" it in the child process. - auto clone = try_create_purgeable_with_size(size(), AllocationStrategy::None); - if (!clone) - return {}; + auto maybe_clone = try_create_purgeable_with_size(size(), AllocationStrategy::None); + if (maybe_clone.is_error()) + return maybe_clone.error(); + + auto clone = maybe_clone.release_value(); clone->m_volatile = true; return clone; } @@ -38,7 +40,7 @@ RefPtr AnonymousVMObject::try_clone() auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed); if (!committed_pages.has_value()) - return {}; + return ENOMEM; // Create or replace the committed cow pages. When cloning a previously // cloned vmobject, we want to essentially "fork", leaving us and the @@ -49,11 +51,12 @@ RefPtr AnonymousVMObject::try_clone() auto new_shared_committed_cow_pages = try_create(committed_pages.release_value()); if (!new_shared_committed_cow_pages) - return {}; + return ENOMEM; - auto clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, *new_shared_committed_cow_pages)); - if (!clone) - return {}; + auto maybe_clone = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(*this, new_shared_committed_cow_pages.release_nonnull())); + if (!maybe_clone) + return ENOMEM; + auto clone = maybe_clone.release_nonnull(); m_shared_committed_cow_pages = move(new_shared_committed_cow_pages); @@ -76,52 +79,58 @@ RefPtr AnonymousVMObject::try_clone() return clone; } -RefPtr AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy) +KResultOr> AnonymousVMObject::try_create_with_size(size_t size, AllocationStrategy strategy) { Optional committed_pages; if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); if (!committed_pages.has_value()) - return {}; + return ENOMEM; } - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); + + return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); } -RefPtr AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size) +KResultOr> AnonymousVMObject::try_create_physically_contiguous_with_size(size_t size) { auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size); if (contiguous_physical_pages.is_empty()) - return {}; - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span())); + return ENOMEM; + + return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(contiguous_physical_pages.span())); } -RefPtr AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy) +KResultOr> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy strategy) { Optional committed_pages; if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) { committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast(PAGE_SIZE))); if (!committed_pages.has_value()) - return {}; + return ENOMEM; } + auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages))); if (!vmobject) - return {}; + return ENOMEM; + vmobject->m_purgeable = true; - return vmobject; + return vmobject.release_nonnull(); } -RefPtr AnonymousVMObject::try_create_with_physical_pages(Span> physical_pages) +KResultOr> AnonymousVMObject::try_create_with_physical_pages(Span> physical_pages) { - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(physical_pages)); + return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(physical_pages)); } -RefPtr AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size) +KResultOr> AnonymousVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size) { if (paddr.offset(size) < paddr) { dbgln("Shenanigans! try_create_for_physical_range({}, {}) would wrap around", paddr, size); - return nullptr; + // Since we can't wrap around yet, let's pretend to OOM. + return ENOMEM; } - return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(paddr, size)); + + return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, size)); } AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy, Optional committed_pages) diff --git a/Kernel/Memory/AnonymousVMObject.h b/Kernel/Memory/AnonymousVMObject.h index 358dc3a49c..04edca4314 100644 --- a/Kernel/Memory/AnonymousVMObject.h +++ b/Kernel/Memory/AnonymousVMObject.h @@ -18,12 +18,12 @@ class AnonymousVMObject final : public VMObject { public: virtual ~AnonymousVMObject() override; - static RefPtr try_create_with_size(size_t, AllocationStrategy); - static RefPtr try_create_for_physical_range(PhysicalAddress paddr, size_t size); - static RefPtr try_create_with_physical_pages(Span>); - static RefPtr try_create_purgeable_with_size(size_t, AllocationStrategy); - static RefPtr try_create_physically_contiguous_with_size(size_t); - virtual RefPtr try_clone() override; + static KResultOr> try_create_with_size(size_t, AllocationStrategy); + static KResultOr> try_create_for_physical_range(PhysicalAddress paddr, size_t size); + static KResultOr> try_create_with_physical_pages(Span>); + static KResultOr> try_create_purgeable_with_size(size_t, AllocationStrategy); + static KResultOr> try_create_physically_contiguous_with_size(size_t); + virtual KResultOr> try_clone() override; [[nodiscard]] NonnullRefPtr allocate_committed_page(Badge); PageFaultResponse handle_cow_fault(size_t, VirtualAddress); diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index 6129764384..62f4907a9c 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -706,38 +706,39 @@ OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, Str auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - auto vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size); - if (!vmobject) { + auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size); + if (maybe_vmobject.is_error()) { kernel_page_directory().range_allocator().deallocate(range.value()); + // FIXME: Would be nice to be able to return a KResultOr from here. return {}; } - return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), maybe_vmobject.release_value(), name, access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable) { VERIFY(!(size % PAGE_SIZE)); - auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy); - if (!vm_object) + auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy); + if (maybe_vm_object.is_error()) return {}; ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - return allocate_kernel_region_with_vmobject(range.value(), vm_object.release_nonnull(), name, access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable) { - auto vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size); - if (!vm_object) + auto maybe_vm_object = AnonymousVMObject::try_create_for_physical_range(paddr, size); + if (maybe_vm_object.is_error()) return {}; VERIFY(!(size % PAGE_SIZE)); ScopedSpinLock lock(kernel_page_directory().get_lock()); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); if (!range.has_value()) return {}; - return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable); } OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) diff --git a/Kernel/Memory/PrivateInodeVMObject.cpp b/Kernel/Memory/PrivateInodeVMObject.cpp index 6c3b3e8258..0c3aa9f981 100644 --- a/Kernel/Memory/PrivateInodeVMObject.cpp +++ b/Kernel/Memory/PrivateInodeVMObject.cpp @@ -14,9 +14,9 @@ RefPtr PrivateInodeVMObject::try_create_with_inode(Inode& return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(inode, inode.size())); } -RefPtr PrivateInodeVMObject::try_clone() +KResultOr> PrivateInodeVMObject::try_clone() { - return adopt_ref_if_nonnull(new (nothrow) PrivateInodeVMObject(*this)); + return adopt_nonnull_ref_or_enomem(new (nothrow) PrivateInodeVMObject(*this)); } PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size) diff --git a/Kernel/Memory/PrivateInodeVMObject.h b/Kernel/Memory/PrivateInodeVMObject.h index 4d8438ac92..a664c219a8 100644 --- a/Kernel/Memory/PrivateInodeVMObject.h +++ b/Kernel/Memory/PrivateInodeVMObject.h @@ -19,7 +19,7 @@ public: virtual ~PrivateInodeVMObject() override; static RefPtr try_create_with_inode(Inode&); - virtual RefPtr try_clone() override; + virtual KResultOr> try_clone() override; private: virtual bool is_private_inode() const override { return true; } diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 8dbb410469..5865316b2d 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -75,14 +75,14 @@ OwnPtr Region::clone() if (vmobject().is_inode()) VERIFY(vmobject().is_private_inode()); - auto vmobject_clone = vmobject().try_clone(); - if (!vmobject_clone) + auto maybe_vmobject_clone = vmobject().try_clone(); + if (maybe_vmobject_clone.is_error()) return {}; // Set up a COW region. The parent (this) region becomes COW as well! remap(); auto clone_region = Region::try_create_user_accessible( - m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared); + m_range, maybe_vmobject_clone.release_value(), m_offset_in_vmobject, m_name ? m_name->try_clone() : OwnPtr {}, access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared); if (!clone_region) { dbgln("Region::clone: Unable to allocate new Region for COW"); return nullptr; diff --git a/Kernel/Memory/ScatterGatherList.cpp b/Kernel/Memory/ScatterGatherList.cpp index 3151417607..60418985b5 100644 --- a/Kernel/Memory/ScatterGatherList.cpp +++ b/Kernel/Memory/ScatterGatherList.cpp @@ -10,10 +10,12 @@ namespace Kernel::Memory { RefPtr ScatterGatherList::try_create(AsyncBlockDeviceRequest& request, Span> allocated_pages, size_t device_block_size) { - auto vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages); - if (!vm_object) + auto maybe_vm_object = AnonymousVMObject::try_create_with_physical_pages(allocated_pages); + if (maybe_vm_object.is_error()) { + // FIXME: Would be nice to be able to return a KResultOr here. return {}; - return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(vm_object.release_nonnull(), request, device_block_size)); + } + return adopt_ref_if_nonnull(new (nothrow) ScatterGatherList(maybe_vm_object.release_value(), request, device_block_size)); } ScatterGatherList::ScatterGatherList(NonnullRefPtr vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size) diff --git a/Kernel/Memory/SharedInodeVMObject.cpp b/Kernel/Memory/SharedInodeVMObject.cpp index dd78f80208..cfeda69841 100644 --- a/Kernel/Memory/SharedInodeVMObject.cpp +++ b/Kernel/Memory/SharedInodeVMObject.cpp @@ -21,9 +21,9 @@ RefPtr SharedInodeVMObject::try_create_with_inode(Inode& in return vmobject; } -RefPtr SharedInodeVMObject::try_clone() +KResultOr> SharedInodeVMObject::try_clone() { - return adopt_ref_if_nonnull(new (nothrow) SharedInodeVMObject(*this)); + return adopt_nonnull_ref_or_enomem(new (nothrow) SharedInodeVMObject(*this)); } SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size) diff --git a/Kernel/Memory/SharedInodeVMObject.h b/Kernel/Memory/SharedInodeVMObject.h index 047d335074..6690c3a2d7 100644 --- a/Kernel/Memory/SharedInodeVMObject.h +++ b/Kernel/Memory/SharedInodeVMObject.h @@ -17,7 +17,7 @@ class SharedInodeVMObject final : public InodeVMObject { public: static RefPtr try_create_with_inode(Inode&); - virtual RefPtr try_clone() override; + virtual KResultOr> try_clone() override; private: virtual bool is_shared_inode() const override { return true; } diff --git a/Kernel/Memory/VMObject.h b/Kernel/Memory/VMObject.h index 7f81f5fc30..641ab5b16f 100644 --- a/Kernel/Memory/VMObject.h +++ b/Kernel/Memory/VMObject.h @@ -33,7 +33,7 @@ class VMObject : public RefCounted public: virtual ~VMObject(); - virtual RefPtr try_clone() = 0; + virtual KResultOr> try_clone() = 0; virtual bool is_anonymous() const { return false; } virtual bool is_inode() const { return false; } diff --git a/Kernel/Syscalls/anon_create.cpp b/Kernel/Syscalls/anon_create.cpp index 6e3e252202..4ea569c973 100644 --- a/Kernel/Syscalls/anon_create.cpp +++ b/Kernel/Syscalls/anon_create.cpp @@ -29,11 +29,11 @@ KResultOr Process::sys$anon_create(size_t size, int options) if (new_fd_or_error.is_error()) return new_fd_or_error.error(); auto new_fd = new_fd_or_error.release_value(); - auto vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve); - if (!vmobject) - return ENOMEM; + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); - auto anon_file = AnonymousFile::create(vmobject.release_nonnull()); + auto anon_file = AnonymousFile::create(maybe_vmobject.release_value()); if (!anon_file) return ENOMEM; auto description_or_error = FileDescription::create(*anon_file); diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 742b8dd385..4ad3550bb2 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -219,12 +219,17 @@ KResultOr Process::sys$mmap(Userspace u if (map_anonymous) { auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve; RefPtr vmobject; - if (flags & MAP_PURGEABLE) - vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy); - else - vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy); - if (!vmobject) - return ENOMEM; + if (flags & MAP_PURGEABLE) { + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + vmobject = maybe_vmobject.release_value(); + } else { + auto maybe_vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy); + if (maybe_vmobject.is_error()) + return maybe_vmobject.error(); + vmobject = maybe_vmobject.release_value(); + } auto region_or_error = address_space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared); if (region_or_error.is_error()) return region_or_error.error().error();